From 0c64bdfcbc8dac0ff472c9bfec20796e6baf8f3a Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Fri, 10 Jan 2020 12:32:44 +0100 Subject: [PATCH] Update vendor/ directory Signed-off-by: Simon Pasquier --- .../providers/darwin/defs_darwin.go | 69 - .../miekg/dns/duplicate_generate.go | 144 - vendor/github.com/miekg/dns/msg_generate.go | 328 - vendor/github.com/miekg/dns/types_generate.go | 285 - .../minio/minio-go/v6/functional_tests.go | 10068 ---------------- vendor/golang.org/x/net/internal/iana/gen.go | 383 - .../x/net/internal/socket/defs_aix.go | 38 - .../x/net/internal/socket/defs_darwin.go | 36 - .../x/net/internal/socket/defs_dragonfly.go | 36 - .../x/net/internal/socket/defs_freebsd.go | 36 - .../x/net/internal/socket/defs_linux.go | 40 - .../x/net/internal/socket/defs_netbsd.go | 38 - .../x/net/internal/socket/defs_openbsd.go | 36 - .../x/net/internal/socket/defs_solaris.go | 36 - vendor/golang.org/x/net/ipv4/defs_aix.go | 39 - vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 - .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 - vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 - vendor/golang.org/x/net/ipv4/defs_linux.go | 122 - vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 - vendor/golang.org/x/net/ipv4/gen.go | 199 - vendor/golang.org/x/net/ipv6/defs_aix.go | 82 - vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 - .../golang.org/x/net/ipv6/defs_dragonfly.go | 84 - vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 - vendor/golang.org/x/net/ipv6/defs_linux.go | 147 - vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 - vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 - vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 - vendor/golang.org/x/net/ipv6/gen.go | 199 - vendor/golang.org/x/net/publicsuffix/gen.go | 717 -- vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - vendor/golang.org/x/sys/unix/mkpost.go | 106 - vendor/golang.org/x/sys/unix/mksyscall.go | 407 - .../x/sys/unix/mksyscall_aix_ppc.go | 404 - .../x/sys/unix/mksyscall_aix_ppc64.go | 602 - .../x/sys/unix/mksyscall_solaris.go | 335 - vendor/golang.org/x/sys/unix/mksysnum.go | 190 - vendor/golang.org/x/sys/unix/types_aix.go | 236 - vendor/golang.org/x/sys/unix/types_darwin.go | 283 - .../golang.org/x/sys/unix/types_dragonfly.go | 263 - vendor/golang.org/x/sys/unix/types_freebsd.go | 356 - vendor/golang.org/x/sys/unix/types_netbsd.go | 289 - vendor/golang.org/x/sys/unix/types_openbsd.go | 282 - vendor/golang.org/x/sys/unix/types_solaris.go | 266 - .../golang.org/x/text/feature/plural/gen.go | 525 - .../x/text/feature/plural/gen_common.go | 74 - .../x/text/internal/language/compact/gen.go | 64 - .../internal/language/compact/gen_index.go | 113 - .../internal/language/compact/gen_parents.go | 54 - .../x/text/internal/language/gen.go | 1520 --- .../x/text/internal/language/gen_common.go | 20 - .../golang.org/x/text/internal/number/gen.go | 458 - .../x/text/internal/number/gen_common.go | 59 - vendor/golang.org/x/text/language/gen.go | 305 - vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../x/text/unicode/norm/maketables.go | 986 -- .../golang.org/x/text/unicode/norm/triegen.go | 117 - vendor/modules.txt | 414 +- 63 files changed, 207 insertions(+), 22846 deletions(-) delete mode 100644 vendor/github.com/elastic/go-sysinfo/providers/darwin/defs_darwin.go delete mode 100644 vendor/github.com/miekg/dns/duplicate_generate.go delete mode 100644 vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/github.com/minio/minio-go/v6/functional_tests.go delete mode 100644 vendor/golang.org/x/net/internal/iana/gen.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_aix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_aix.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/gen.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_aix.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/gen.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go delete mode 100644 vendor/golang.org/x/text/feature/plural/gen.go delete mode 100644 vendor/golang.org/x/text/feature/plural/gen_common.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go delete mode 100644 vendor/golang.org/x/text/internal/number/gen.go delete mode 100644 vendor/golang.org/x/text/internal/number/gen_common.go delete mode 100644 vendor/golang.org/x/text/language/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/defs_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/defs_darwin.go deleted file mode 100644 index 1ca06c638d..0000000000 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/defs_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// +build ignore - -package darwin - -/* -#include -#include -*/ -import "C" - -type processState uint32 - -const ( - stateSIDL processState = iota + 1 - stateRun - stateSleep - stateStop - stateZombie -) - -const argMax = C.ARG_MAX - -type bsdInfo C.struct_proc_bsdinfo - -type procTaskInfo C.struct_proc_taskinfo - -type procTaskAllInfo C.struct_proc_taskallinfo - -type vinfoStat C.struct_vinfo_stat - -type fsid C.struct_fsid - -type vnodeInfo C.struct_vnode_info - -type vnodeInfoPath C.struct_vnode_info_path - -type procVnodePathInfo C.struct_proc_vnodepathinfo - -type vmStatisticsData C.vm_statistics_data_t - -type vmStatistics64Data C.vm_statistics64_data_t - -type vmSize C.vm_size_t - -const ( - cpuStateUser = C.CPU_STATE_USER - cpuStateSystem = C.CPU_STATE_SYSTEM - cpuStateIdle = C.CPU_STATE_IDLE - cpuStateNice = C.CPU_STATE_NICE -) - -type hostCPULoadInfo C.host_cpu_load_info_data_t diff --git a/vendor/github.com/miekg/dns/duplicate_generate.go b/vendor/github.com/miekg/dns/duplicate_generate.go deleted file mode 100644 index 9b7a71b16e..0000000000 --- a/vendor/github.com/miekg/dns/duplicate_generate.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" -) - -var packageHdr = ` -// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. - -package dns - -` - -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - - if name == "PrivateRR" || name == "OPT" { - continue - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate the duplicate check for each type. - fmt.Fprint(b, "// isDuplicate() functions\n\n") - for _, name := range namedTypes { - - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name) - fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name) - fmt.Fprint(b, "if !ok { return false }\n") - fmt.Fprint(b, "_ = r2\n") - for i := 1; i < st.NumFields(); i++ { - field := st.Field(i).Name() - o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) } - o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) } - - // For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are - // *indirectly* defined as a slice in the net package). - if _, ok := st.Field(i).Type().(*types.Slice); ok { - o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}") - - if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` { - o3(`for i := 0; i < len(r1.%s); i++ { - if !isDuplicateName(r1.%s[i], r2.%s[i]) { - return false - } - }`) - - continue - } - - o3(`for i := 0; i < len(r1.%s); i++ { - if r1.%s[i] != r2.%s[i] { - return false - } - }`) - - continue - } - - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"a"`, `dns:"aaaa"`: - o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}") - case `dns:"cdomain-name"`, `dns:"domain-name"`: - o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}") - default: - o2("if r1.%s != r2.%s {\nreturn false\n}") - } - } - fmt.Fprintf(b, "return true\n}\n\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zduplicate.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 721a0fce32..0000000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,328 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// Code generated by "go run msg_generate.go"; DO NOT EDIT. - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {\n", name) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("off, err = packDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = packDomainName(rr.%s, msg, off, compression, false)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): - // directly write instead of using o() so we get the error check in the correct place - field := st.Field(i).Name() - fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty -if rr.%s != "-" { - off, err = packStringHex(rr.%s, msg, off) - if err != nil { - return off, err - } -} -`, field, field) - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"any"`: - o("off, err = packStringAny(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) unpack(msg []byte, off int) (off1 int, err error) {\n", name) - fmt.Fprint(b, `rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"any"`: - o("rr.%s, off, err = unpackStringAny(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return off, nil - } -`) - } - } - fmt.Fprintf(b, "return off, nil }\n\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index 8cda2a74c5..0000000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,285 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, - "CSYNC": {}, -} - -var packageHdr = ` -// Code generated by "go run types_generate.go"; DO NOT EDIT. - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n") - case `dns:"domain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n") - case `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, true)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, false)\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored - o("l += len(rr.%s)/2\n") - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2\n") - case st.Tag(i) == `dns:"any"`: - o("l += len(rr.%s)\n") - case st.Tag(i) == `dns:"a"`: - o("if len(rr.%s) != 0 { l += net.IPv4len }\n") - case st.Tag(i) == `dns:"aaaa"`: - o("if len(rr.%s) != 0 { l += net.IPv6len }\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l++ // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"rr.Hdr"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - // For the EDNS0 interface (used in the OPT RR), we need to call the copy method on each element. - if t == "EDNS0" { - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s));\nfor i,e := range rr.%s {\n %s[i] = e.copy()\n}\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/minio/minio-go/v6/functional_tests.go b/vendor/github.com/minio/minio-go/v6/functional_tests.go deleted file mode 100644 index d3d116f4eb..0000000000 --- a/vendor/github.com/minio/minio-go/v6/functional_tests.go +++ /dev/null @@ -1,10068 +0,0 @@ -// +build ignore - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "mime/multipart" - "net/http" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - humanize "github.com/dustin/go-humanize" - log "github.com/sirupsen/logrus" - - "github.com/minio/minio-go/v6" - "github.com/minio/minio-go/v6/pkg/encrypt" -) - -const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" -const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = src.Int63(), letterIdxMax - } - if idx := int(cache & letterIdxMask); idx < len(letterBytes) { - b[i] = letterBytes[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - return prefix + string(b[0:30-len(prefix)]) -} - -var dataFileMap = map[string]int{ - "datafile-1-b": 1, - "datafile-10-kB": 10 * humanize.KiByte, - "datafile-33-kB": 33 * humanize.KiByte, - "datafile-100-kB": 100 * humanize.KiByte, - "datafile-1.03-MB": 1056 * humanize.KiByte, - "datafile-1-MB": 1 * humanize.MiByte, - "datafile-5-MB": 5 * humanize.MiByte, - "datafile-6-MB": 6 * humanize.MiByte, - "datafile-11-MB": 11 * humanize.MiByte, - "datafile-129-MB": 129 * humanize.MiByte, -} - -func isFullMode() bool { - return os.Getenv("MINT_MODE") == "full" -} - -func getFuncName() string { - pc, _, _, _ := runtime.Caller(1) - return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") -} - -// Tests bucket re-create errors. -func testMakeBucketError() { - region := "eu-central-1" - - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // skipping region functional tests for non s3 runs - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket Failed", err) - return - } - if err = c.MakeBucket(bucketName, region); err == nil { - logError(testName, function, args, startTime, "", "Bucket already exists", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testMetadataSizeLimit() { - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts.UserMetadata": "", - } - rand.Seed(startTime.Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - const HeaderSizeLimit = 8 * 1024 - const UserMetadataLimit = 2 * 1024 - - // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail - metadata := make(map[string]string) - metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) - return - } - - // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail - metadata = make(map[string]string) - metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegions() { - region := "eu-central-1" - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // skipping region functional tests for non s3 runs - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - region = "us-west-2" - args["region"] = region - if err = c.MakeBucket(bucketName+".withperiod", region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectReadAt() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "objectContentType", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object content type - objectContentType := "binary/octet-stream" - args["objectContentType"] = objectContentType - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Get Object failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat Object failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) - return - } - if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content types don't match", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectWithMetadata() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object custom metadata - customContentType := "custom/contenttype" - - args["metadata"] = map[string][]string{ - "Content-Type": {customContentType}, - "X-Amz-Meta-CustomKey": {"extra spaces in value"}, - } - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ - ContentType: customContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testPutObjectWithContentLanguage() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - data := bytes.Repeat([]byte("a"), int(0)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ - ContentLanguage: "en", - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != 0 { - logError(testName, function, args, startTime, "", "Expected upload object '0' doesn't match with PutObject return value", err) - return - } - - objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objInfo.Metadata.Get("Content-Language") != "en" { - logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with streaming signature. -func testPutObjectStreaming() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload an object. - sizes := []int64{0, 64*1024 - 1, 64 * 1024} - - for _, size := range sizes { - data := bytes.Repeat([]byte("a"), int(size)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - - if n != size { - logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object seeker from the end, using whence set to '2'. -func testGetObjectSeekEnd() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - pos, err := r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - buf2 := make([]byte, 100) - m, err := io.ReadFull(r, buf2) - if err != nil { - logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) - return - } - hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) - hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) - if hexBuf1 != hexBuf2 { - logError(testName, function, args, startTime, "", "Values at same index dont match", err) - return - } - pos, err = r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - if err = r.Close(); err != nil { - logError(testName, function, args, startTime, "", "ObjectClose failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwice() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test RemoveObjectsWithContext request context cancels after timeout -func testRemoveObjectsWithContext() { - // Initialize logging params. - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current tie. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - - // Generate put data. - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 20 objects. - nrObjects := 20 - objectsCh := make(chan string) - go func() { - defer close(objectsCh) - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- objectName - } - }() - // Set context to cancel in 1 nanosecond. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Call RemoveObjectsWithContext API with short timeout. - errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) - // Check for error. - select { - case r := <-errorCh: - if r.Err == nil { - logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err) - return - } - } - // Set context with longer timeout. - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed. - errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) - select { - case r, more := <-errorCh: - if more || r.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - // Delete all objects and buckets. - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test removing multiple objects with Remove API -func testRemoveMultipleObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 1100 objects - nrObjects := 200 - - objectsCh := make(chan string) - - go func() { - defer close(objectsCh) - // Upload objects and send them to objectsCh - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- objectName - } - }() - - // Call RemoveObjects API - errorCh := c.RemoveObjects(bucketName, objectsCh) - - // Check if errorCh doesn't receive any error - select { - case r, more := <-errorCh: - if more { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject of a big file to trigger multipart -func testFPutObjectMultipart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - var fileName = getMintDataDirFilePath("datafile-129-MB") - if fileName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - fileName = file.Name() - args["fileName"] = fileName - } - totalSize := dataFileMap["datafile-129-MB"] - // Set base object name - objectName := bucketName + "FPutObject" + "-standard" - args["objectName"] = objectName - - objectContentType := "testapplication/octet-stream" - args["objectContentType"] = objectContentType - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", err) - return - } - if objInfo.Size != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) - return - } - if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType doesn't match", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject with null contentType (default = application/octet-stream) -func testFPutObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - - // Make a new bucket. - args["bucketName"] = bucketName - args["location"] = location - function = "MakeBucket()bucketName, location" - err = c.MakeBucket(bucketName, location) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-129-MB") - if fName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - totalSize := dataFileMap["datafile-129-MB"] - - // Set base object name - function = "FPutObject(bucketName, objectName, fileName, opts)" - objectName := bucketName + "FPutObject" - args["objectName"] = objectName + "-standard" - args["fileName"] = fName - args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - srcFile, err := os.Open(fName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - defer srcFile.Close() - // Add extension to temp file name - tmpFile, err := os.Create(fName + ".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "File create failed", err) - return - } - defer tmpFile.Close() - _, err = io.Copy(tmpFile, srcFile) - if err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-GTar" - args["opts"] = minio.PutObjectOptions{} - n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - - // Check headers - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-standard" - rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-Octet" - rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-GTar" - rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - if err = os.Remove(fName + ".gtar"); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObjectWithContext request context cancels after timeout -func testFPutObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - totalSize := dataFileMap["datafile-1-MB"] - - // Set base object name - objectName := bucketName + "FPutObjectWithContext" - args["objectName"] = objectName - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed - n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - - _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Tests FPutObjectWithContext request context cancels after timeout -func testFPutObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{ContentType:objectContentType}", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "Temp file creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - totalSize := dataFileMap["datafile-1-MB"] - - // Set base object name - objectName := bucketName + "FPutObjectWithContext" - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed - n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err) - return - } - - _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test validates putObject with context to see if request cancellation is honored. -func testPutObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "opts": "", - } - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} - defer cancel() - - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - // Generic seek error for errors other than io.EOF - seekErr := errors.New("seek error") - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, seekErr, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, seekErr, false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - // We expect an error - if testCase.err == seekErr && err == nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // We expect a specific error - if testCase.err != seekErr && testCase.err != err { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // If we expect an error go to the next loop - if testCase.err != nil { - continue - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test Presigned Post Policy -func testPresignedPostPolicy() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PresignedPostPolicy(policy)" - args := map[string]interface{}{ - "policy": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - // Azure requires the key to not start with a number - metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") - metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - policy := minio.NewPostPolicy() - - if err := policy.SetBucket(""); err == nil { - logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) - return - } - if err := policy.SetKey(""); err == nil { - logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) - return - } - if err := policy.SetKeyStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) - return - } - if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { - logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) - return - } - if err := policy.SetContentType(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) - return - } - if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { - logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) - return - } - if err := policy.SetUserMetadata("", ""); err == nil { - logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) - return - } - - policy.SetBucket(bucketName) - policy.SetKey(objectName) - policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days - policy.SetContentType("binary/octet-stream") - policy.SetContentLengthRange(10, 1024*1024) - policy.SetUserMetadata(metadataKey, metadataValue) - args["policy"] = policy.String() - - presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) - return - } - - var formBuf bytes.Buffer - writer := multipart.NewWriter(&formBuf) - for k, v := range formData { - writer.WriteField(k, v) - } - - // Get a 33KB file to upload and test if set post policy works - var filePath = getMintDataDirFilePath("datafile-33-kB") - if filePath == "" { - // Make a temp file with 33 KB data. - file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - filePath = file.Name() - } - - // add file to post request - f, err := os.Open(filePath) - defer f.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - w, err := writer.CreateFormFile("file", filePath) - if err != nil { - logError(testName, function, args, startTime, "", "CreateFormFile failed", err) - return - } - - _, err = io.Copy(w, f) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - writer.Close() - - // make post request with correct form data - res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes())) - if err != nil { - logError(testName, function, args, startTime, "", "Http request failed", err) - return - } - defer res.Body.Close() - if res.StatusCode != http.StatusNoContent { - logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) - return - } - - // expected path should be absolute path of the object - var scheme string - if mustParseBool(os.Getenv(enableHTTPS)) { - scheme = "https://" - } else { - scheme = "http://" - } - - expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName - expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - - if val, ok := res.Header["Location"]; ok { - if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { - logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) - return - } - } else { - logError(testName, function, args, startTime, "", "Location not found in header response", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(dst, src)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(bucketName+"-copy", "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - // Copy Source - src := minio.NewSourceInfo(bucketName, objectName, nil) - args["src"] = src - - // Set copy conditions. - - // All invalid conditions first. - err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagExceptCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) - return - } - - err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) - return - } - err = src.SetMatchETagCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) - return - } - - dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) - args["dst"] = dst - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - // Perform the Copy - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) - return - } - - // Close all the get readers before proceeding with CopyObject operations. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.NewSourceInfo(bucketName, objectName, nil) - err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) - return - } - err = src.SetMatchETagExceptCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) - return - } - - // Perform the Copy which should fail - err = c.CopyObject(dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - // Perform the Copy which should update only metadata. - src = minio.NewSourceInfo(bucketName, objectName, nil) - dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{ - "Copy": "should be same", - }) - args["dst"] = dst - args["src"] = src - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) - return - } - - oi, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - stOpts := minio.StatObjectOptions{} - stOpts.SetMatchETag(oi.ETag) - objInfo, err = c.StatObject(bucketName, objectName, stOpts) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) - return - } - - if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { - logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - if err = cleanupBucket(bucketName+"-copy", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderSeeker interface methods. -func testSSECEncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderSeeker interface methods. -func testSSES3EncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderAt interface methods. -func testSSECEncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderAt interface methods. -func testSSES3EncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// testSSECEncryptionPutGet tests encryption with customer provided encryption keys -func testSSECEncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestEncryptionFPut tests encryption with customer specified encryption keys -func testSSECEncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// testSSES3EncryptionPutGet tests SSE-S3 encryption -func testSSES3EncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back without any encryption headers - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestSSES3EncryptionFPut tests server side encryption -func testSSES3EncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testBucketNotification() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "SetBucketNotification(bucketName)" - args := map[string]interface{}{ - "bucketName": "", - } - - if os.Getenv("NOTIFY_BUCKET") == "" || - os.Getenv("NOTIFY_SERVICE") == "" || - os.Getenv("NOTIFY_REGION") == "" || - os.Getenv("NOTIFY_ACCOUNTID") == "" || - os.Getenv("NOTIFY_RESOURCE") == "" { - ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := os.Getenv("NOTIFY_BUCKET") - args["bucketName"] = bucketName - - topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) - queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") - - topicConfig := minio.NewNotificationConfig(topicArn) - - topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) - topicConfig.AddFilterSuffix("jpg") - - queueConfig := minio.NewNotificationConfig(queueArn) - queueConfig.AddEvents(minio.ObjectCreatedAll) - queueConfig.AddFilterPrefix("photos/") - - bNotification := minio.BucketNotification{} - bNotification.AddTopic(topicConfig) - - // Add the same topicConfig again, should have no effect - // because it is duplicated - bNotification.AddTopic(topicConfig) - if len(bNotification.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Duplicate entry added", err) - return - } - - // Add and remove a queue config - bNotification.AddQueue(queueConfig) - bNotification.RemoveQueueByArn(queueArn) - - err = c.SetBucketNotification(bucketName, bNotification) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) - return - } - - bNotification, err = c.GetBucketNotification(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) - return - } - - if len(bNotification.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Topic config is empty", err) - return - } - - if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { - logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) - return - } - - err = c.RemoveAllBucketNotification(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests comprehensive list of all methods. -func testFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctional()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket. - function = "MakeBucket(bucketName, region)" - functionAll = "MakeBucket(bucketName, region)" - args["bucketName"] = bucketName - err = c.MakeBucket(bucketName, "us-east-1") - - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File creation failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "File write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find the bucket", err) - return - } - - // Asserting the default bucket policy. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - nilPolicy, err := c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - if nilPolicy != "" { - logError(testName, function, args, startTime, "", "policy should be set to nil", err) - return - } - - // Set the bucket policy to 'public readonly'. - function = "SetBucketPolicy(bucketName, readOnlyPolicy)" - functionAll += ", " + function - - readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readOnlyPolicy, - } - - err = c.SetBucketPolicy(bucketName, readOnlyPolicy) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readonly`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public writeonly'. - function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" - functionAll += ", " + function - - writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": writeOnlyPolicy, - } - err = c.SetBucketPolicy(bucketName, writeOnlyPolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `writeonly`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - - _, err = c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, readWritePolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readwrite`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets() - - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("f"), 1<<19) - - function = "PutObject(bucketName, objectName, reader, contentType)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) - return - } - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-nolength", - "contentType": "binary/octet-stream", - } - - n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - objFound = false - isRecursive = true // Recursive is true. - function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - incompObjNotFound := true - - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) - return - } - newReader.Close() - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject failed", err) - return - } - - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - // Verify if presigned url works. - resp, err := http.Head(presignedHeadURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - resp.Body.Close() - - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedGetObject success", err) - return - } - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - "reqParams": reqParams, - } - presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) - return - } - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedPutObject success", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - buf = bytes.Repeat([]byte("g"), 1<<19) - - req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) - return - } - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively cancelled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: http.DefaultTransport, - } - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) - return - } - - newReadBytes, err = ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "RemoveObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - err = c.RemoveObject(bucketName, objectName) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - args["objectName"] = objectName + "-f" - err = c.RemoveObject(bucketName, objectName+"-f") - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-nolength" - err = c.RemoveObject(bucketName, objectName+"-nolength") - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-presigned" - err = c.RemoveObject(bucketName, objectName+"-presigned") - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - function = "RemoveBucket(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - err = c.RemoveBucket(bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - err = c.RemoveBucket(bucketName) - if err == nil { - logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) - return - } - if err.Error() != "The specified bucket does not exist" { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File Remove failed", err) - return - } - if err = os.Remove(fileName + "-f"); err != nil { - logError(testName, function, args, startTime, "", "File Remove failed", err) - return - } - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test for validating GetObject Reader* methods functioning when the -// object is modified in the object store. -func testGetObjectModified() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer c.RemoveBucket(bucketName) - - // Upload an object. - objectName := "myobject" - args["objectName"] = objectName - content := "helloworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - defer c.RemoveObject(bucketName, objectName) - - reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) - return - } - defer reader.Close() - - // Read a few bytes of the object. - b := make([]byte, 5) - n, err := reader.ReadAt(b, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) - return - } - - // Upload different contents to the same object while object is being read. - newContent := "goodbyeworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - // Confirm that a Stat() call in between doesn't change the Object's cached etag. - _, err = reader.Stat() - expectedError := "At least one of the pre-conditions you specified did not hold" - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - - // Read again only to find object contents have been modified since last read. - _, err = reader.ReadAt(b, int64(n)) - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject to upload a file seeked at a given offset. -func testPutObjectUploadSeekedObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, fileToUpload, contentType)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileToUpload": "", - "contentType": "binary/octet-stream", - } - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer c.RemoveBucket(bucketName) - - var tempfile *os.File - - if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { - tempfile, err = os.Open(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - args["fileToUpload"] = fileName - } else { - tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile create failed", err) - return - } - args["fileToUpload"] = tempfile.Name() - - // Generate 100kB data - if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - defer os.Remove(tempfile.Name()) - - // Seek back to the beginning of the file. - tempfile.Seek(0, 0) - } - var length = 100 * humanize.KiByte - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - offset := length / 2 - if _, err = tempfile.Seek(int64(offset), 0); err != nil { - logError(testName, function, args, startTime, "", "TempFile seek failed", err) - return - } - - n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if n != int64(length-offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err) - return - } - tempfile.Close() - - obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer obj.Close() - - n, err = obj.Seek(int64(offset), 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != int64(offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) - return - } - - n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if n != int64(length-offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests bucket re-create errors. -func testMakeBucketErrorV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - region := "eu-west-1" - args["bucketName"] = bucketName - args["region"] = region - - // Make a new bucket in 'eu-west-1'. - if err = c.MakeBucket(bucketName, region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - if err = c.MakeBucket(bucketName, region); err == nil { - logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwiceV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject hidden contentType setting -func testFPutObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Make a temp file with 11*1024*1024 bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) - n, err := io.CopyN(file, r, 11*1024*1024) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Close the file pro-actively for windows. - err = file.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - - // Set base object name - objectName := bucketName + "FPutObject" - args["objectName"] = objectName - args["fileName"] = file.Name() - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - - n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Add extension to temp file name - fileName := file.Name() - err = os.Rename(file.Name(), fileName+".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "Rename failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - args["fileName"] = fileName + ".gtar" - - n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Check headers - rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) - return - } - - rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) - return - } - - rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - err = os.Remove(fileName + ".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegionsV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil { - args["bucketName"] = bucketName + ".withperiod" - args["region"] = "us-west-2" - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data. - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - n, err = r.Seek(offset, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - n, err = r.Seek(0, 1) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - _, err = r.Seek(offset, 2) - if err == nil { - logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) - return - } - n, err = r.Seek(-offset, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != st.Size-offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) - return - } - - var buffer1 bytes.Buffer - if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - // Seek again and read again. - n, err = r.Seek(offset-1, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != (offset - 1) { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) - return - } - - var buffer2 bytes.Buffer - if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - // Verify now lesser bytes. - if !bytes.Equal(buf[2047:], buffer2.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - - // Read directly - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - m, err := r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(bucketName+"-copy", "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - r.Close() - - // Copy Source - src := minio.NewSourceInfo(bucketName, objectName, nil) - args["source"] = src - - // Set copy conditions. - - // All invalid conditions first. - err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagExceptCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) - return - } - - err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) - return - } - err = src.SetMatchETagCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) - return - } - - dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) - args["destination"] = dst - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - // Perform the Copy - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) - return - } - - // Close all the readers. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.NewSourceInfo(bucketName, objectName, nil) - err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) - return - } - err = src.SetMatchETagExceptCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) - return - } - - // Perform the Copy which should fail - err = c.CopyObject(dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - if err = cleanupBucket(bucketName+"-copy", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testComposeObjectErrorCasesWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Test that more than 10K source objects cannot be - // concatenated. - srcArr := [10001]minio.SourceInfo{} - srcSlice := srcArr[:] - dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - args["destination"] = dst - // Just explain about srcArr in args["sourceList"] - // to stop having 10,001 null headers logged - args["sourceList"] = "source array of 10,001 elements" - if err := c.ComposeObject(dst, srcSlice); err == nil { - logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) - return - } else if err.Error() != "There must be as least one and up to 10000 source objects." { - logError(testName, function, args, startTime, "", "Got unexpected error", err) - return - } - - // Create a source with invalid offset spec and check that - // error is returned: - // 1. Create the source object. - const badSrcSize = 5 * 1024 * 1024 - buf := bytes.Repeat([]byte("1"), badSrcSize) - _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - // 2. Set invalid range spec on the object (going beyond - // object size) - badSrc := minio.NewSourceInfo(bucketName, "badObject", nil) - err = badSrc.SetRange(1, badSrcSize) - if err != nil { - logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err) - return - } - // 3. ComposeObject call should fail. - if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil { - logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) - return - } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { - logError(testName, function, args, startTime, "", "Got invalid error", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCasesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -func testComposeMultipleSources(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{ - "destination": "", - "sourceList": "", - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload a small source object - const srcSize = 1024 * 1024 * 5 - buf := bytes.Repeat([]byte("1"), srcSize) - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // We will append 10 copies of the object. - srcs := []minio.SourceInfo{} - for i := 0; i < 10; i++ { - srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil)) - } - // make the last part very small - err = srcs[9].SetRange(0, 0) - if err != nil { - logError(testName, function, args, startTime, "", "SetRange failed", err) - return - } - args["sourceList"] = srcs - - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil) - args["destination"] = dst - - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - err = c.ComposeObject(dst, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objProps.Size != 9*srcSize+1 { - logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test concatenating multiple objects objects -func testCompose10KSourcesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -func testEncryptedEmptyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 0 - var buf []byte // Empty buffer - args["objectName"] = "object" - _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - // 2. Test CopyObject for an empty object - dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil) - if err != nil { - args["objectName"] = "new-object" - function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)" - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - srcInfo := minio.NewSourceInfo(bucketName, "object", sse) - if err = c.CopyObject(dstInfo, srcInfo); err != nil { - function = "CopyObject(dstInfo, srcInfo)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) - return - } - - // 3. Test Key rotation - newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) - dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil) - if err != nil { - args["objectName"] = "new-object" - function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)" - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse) - if err = c.CopyObject(dstInfo, srcInfo); err != nil { - function = "CopyObject(dstInfo, srcInfo)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) - return - } - - // 4. Download the object. - reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) - return - } - // Delete all objects and buckets - delete(args, "objectName") - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - var srcEncryption, dstEncryption encrypt.ServerSide - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ServerSideEncryption: sseSrc, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - if sseSrc != nil && sseSrc.Type() != encrypt.S3 { - srcEncryption = sseSrc - } - - // 2. copy object and change encryption key - src := minio.NewSourceInfo(bucketName, "srcObject", srcEncryption) - args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - if sseDst != nil && sseDst.Type() != encrypt.S3 { - dstEncryption = sseDst - } - // 3. get copied object and check if content is equal - coreClient := minio.Core{c} - reader, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - - // Test key rotation for source object in-place. - var newSSE encrypt.ServerSide - if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { - newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key - } - if sseSrc != nil && sseSrc.Type() == encrypt.S3 { - newSSE = encrypt.NewSSE() - } - if newSSE != nil { - dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Get copied object and check if content is equal - reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err = ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - // Test in-place decryption. - dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - src = minio.NewSourceInfo(bucketName, "srcObject", newSSE) - args["source"] = src - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) - return - } - } - - // Get copied decrypted object and check if content is equal - reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err = ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test encrypted copy object -func testUnencryptedToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc encrypt.ServerSide - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc encrypt.ServerSide - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc, sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -func testDecryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" - if err = c.MakeBucket(bucketName, "us-east-1"); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ - ServerSideEncryption: encryption, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - src := minio.NewSourceInfo(bucketName, objectName, encrypt.SSECopy(encryption)) - args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "decrypted-"+objectName, nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - if err = c.CopyObject(dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - if _, err = c.GetObject(bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testSSECMultipartEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 6MB of data - buf := bytes.Repeat([]byte("abcdef"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - // Upload a 6MB object using multipart mechanism - uploadID, err := c.NewMultipartUpload(bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - var completeParts []minio.CompletePart - - part, err := c.PutObjectPart(bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - part, err = c.PutObjectPart(bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(bucketName, objectName, uploadID, completeParts) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: srcencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err = c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (6*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 6*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 6*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) - } - - getOpts.SetRange(6*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 6*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:6*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) - } - if getBuf[6*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation -func testSSECEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy -func testSSECEncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - var dstencryption encrypt.ServerSide - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy -func testSSECEncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part -func testUnencryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testUnencryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testUnencryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part -func testSSES3EncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcEncryption := encrypt.NewSSE() - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testSSES3EncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testSSES3EncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} -func testUserMetadataCopying() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testUserMetadataCopyingWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - // 1. create a client encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - metadata := make(http.Header) - metadata.Set("x-amz-meta-myheader", "myvalue") - m := make(map[string]string) - m["x-amz-meta-myheader"] = "myvalue" - _, err = c.PutObject(bucketName, "srcObject", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) - return - } - if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 2. create source - src := minio.NewSourceInfo(bucketName, "srcObject", nil) - // 2.1 create destination with metadata set - dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"}) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - // 3. Check that copying to an object with metadata set resets - // the headers on the copy. - args["source"] = src - args["destination"] = dst1 - err = c.CopyObject(dst1, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders := make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 4. create destination with no metadata set and same source - dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - src = minio.NewSourceInfo(bucketName, "srcObject", nil) - - // 5. Check that copying to an object with no metadata set, - // copies metadata. - args["source"] = src - args["destination"] = dst2 - err = c.CopyObject(dst2, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders = metadata - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 6. Compose a pair of sources. - srcs := []minio.SourceInfo{ - minio.NewSourceInfo(bucketName, "srcObject", nil), - minio.NewSourceInfo(bucketName, "srcObject", nil), - } - dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - function = "ComposeObject(destination, sources)" - args["source"] = srcs - args["destination"] = dst3 - err = c.ComposeObject(dst3, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 7. Compose a pair of sources with dest user metadata set. - srcs = []minio.SourceInfo{ - minio.NewSourceInfo(bucketName, "srcObject", nil), - minio.NewSourceInfo(bucketName, "srcObject", nil), - } - dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"}) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - function = "ComposeObject(destination, sources)" - args["source"] = srcs - args["destination"] = dst4 - err = c.ComposeObject(dst4, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - expectedHeaders = make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testUserMetadataCopyingV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testStorageClassMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClass") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - _, err = c.PutObject(bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassInvalidMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassInvalidMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassMetadataCopyObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataCopyObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) - - // Put an object with RRS Storage class - _, err = c.PutObject(bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src := minio.NewSourceInfo(bucketName, "srcObjectRRSClass", nil) - dst, err := minio.NewDestinationInfo(bucketName, "srcObjectRRSClassCopy", nil, nil) - c.CopyObject(dst, src) - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClassCopy") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - // Put an object with Standard Storage class - _, err = c.PutObject(bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src = minio.NewSourceInfo(bucketName, "srcObjectSSClass", nil) - dst, err = minio.NewDestinationInfo(bucketName, "srcObjectSSClassCopy", nil, nil) - c.CopyObject(dst, src) - - // Fetch the meta data of copied object - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with size -1 byte object. -func testPutObjectNoLengthV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - objectName := bucketName + "unique" - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - args["size"] = bufSize - - // Upload an object. - n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put objects of unknown size. -func testPutObjectsUnknownV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Issues are revealed by trying to upload multiple files of unknown size - // sequentially (on 4GB machines) - for i := 1; i <= 4; i++ { - // Simulate that we could be receiving byte slices of data that we want - // to upload as a file - rpipe, wpipe := io.Pipe() - defer rpipe.Close() - go func() { - b := []byte("test") - wpipe.Write(b) - wpipe.Close() - }() - - // Upload the object. - objectName := fmt.Sprintf("%sunique%d", bucketName, i) - args["objectName"] = objectName - - n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - args["size"] = n - if n != int64(4) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err) - return - } - - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with 0 byte object. -func testPutObject0ByteV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": 0, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - objectName := bucketName + "unique" - args["objectName"] = objectName - args["opts"] = minio.PutObjectOptions{} - - // Upload an object. - n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - if n != 0 { - logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCases() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -// Test concatenating 10K objects -func testCompose10KSources() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -// Tests comprehensive list of all methods. -func testFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctionalV2()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - // Make a new bucket. - function = "MakeBucket(bucketName, location)" - functionAll = "MakeBucket(bucketName, location)" - args = map[string]interface{}{ - "bucketName": bucketName, - "location": location, - } - err = c.MakeBucket(bucketName, location) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, bucketPolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets() - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) - return - } - - objectNameNoLength := objectName + "-nolength" - args["objectName"] = objectNameNoLength - n, err = c.PutObject(bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) - return - } - - incompObjNotFound := true - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FgetObject failed", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - // Verify if presigned url works. - resp, err := http.Head(presignedHeadURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "Got empty ETag", err) - return - } - resp.Body.Close() - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - // Generate presigned GET object url. - args["reqParams"] = reqParams - presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - // Verify content disposition. - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - // Generate data more than 32K - buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) - - req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively cancelled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: http.DefaultTransport, - } - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - } - newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err = ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - if err = os.Remove(fileName + "-f"); err != nil { - logError(testName, function, args, startTime, "", "File removes failed", err) - return - } - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test get object with GetObjectWithContext -func testGetObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectWithContext(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) - return - } - - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - - // Read the data back - r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "object Close() call failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with FGetObjectWithContext -func testFGetObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err) - return - } - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object ACLs with GetObjectACL -func testGetObjectACL() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectACL(bucketName, objectName)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // skipping region functional tests for non s3 runs - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData := map[string]string{ - "X-Amz-Acl": "public-read-write", - } - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - objectInfo, getObjectACLErr := c.GetObjectACL(bucketName, objectName) - if getObjectACLErr == nil { - logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) - return - } - - s, ok := objectInfo.Metadata["X-Amz-Acl"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "public-read-write" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) - return - } - - bufSize = dataFileMap["datafile-1-MB"] - var reader2 = getDataReader("datafile-1-MB") - defer reader2.Close() - // Save the data - objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData = map[string]string{ - "X-Amz-Grant-Read": "id=fooread@minio.go", - "X-Amz-Grant-Write": "id=foowrite@minio.go", - } - - _, err = c.PutObject(bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - objectInfo, getObjectACLErr = c.GetObjectACL(bucketName, objectName) - if getObjectACLErr == nil { - logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) - return - } - - if len(objectInfo.Metadata) != 3 { - logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "fooread@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "foowrite@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject with context to see if request cancellation is honored for V2. -func testPutObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer c.RemoveBucket(bucketName) - bufSize := dataFileMap["datatfile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - args["ctx"] = ctx - args["size"] = bufSize - defer cancel() - - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with GetObjectWithContext -func testGetObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectWithContext(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) - return - } - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", " object Close() call failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with FGetObjectWithContext -func testFGetObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - - bufSize := dataFileMap["datatfile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err) - return - } - - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test list object v1 and V2 storage class fields -func testListObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" - args := map[string]interface{}{ - "bucketName": "", - "objectPrefix": "", - "recursive": "true", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName1 := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - _, err = c.PutObject(bucketName, objectName1, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject1 call failed", err) - return - } - - bufSize1 := dataFileMap["datafile-33-kB"] - var reader1 = getDataReader("datafile-33-kB") - defer reader1.Close() - objectName2 := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - _, err = c.PutObject(bucketName, objectName2, reader1, int64(bufSize1), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject2 call failed", err) - return - } - - // Create a done channel to control 'ListObjects' go routine. - doneCh := make(chan struct{}) - // Exit cleanly upon return. - defer close(doneCh) - - // check for storage-class from ListObjects result - for objInfo := range c.ListObjects(bucketName, "", true, doneCh) { - if objInfo.Err != nil { - logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) - return - } - if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() - } - if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() - } - } - - // check for storage-class from ListObjectsV2 result - for objInfo := range c.ListObjectsV2(bucketName, "", true, doneCh) { - if objInfo.Err != nil { - logError(testName, function, args, startTime, "", "ListObjectsV2 failed unexpectedly", err) - return - } - if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info() - } - if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info() - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Convert string to bool and always return false if any error -func mustParseBool(str string) bool { - b, err := strconv.ParseBool(str) - if err != nil { - return false - } - return b -} - -func main() { - // Output to stdout instead of the default stderr - log.SetOutput(os.Stdout) - // create custom formatter - mintFormatter := mintJSONFormatter{} - // set custom formatter - log.SetFormatter(&mintFormatter) - // log Info or above -- success cases are Info level, failures are Fatal level - log.SetLevel(log.InfoLevel) - - tls := mustParseBool(os.Getenv(enableHTTPS)) - kmsEnabled := mustParseBool(os.Getenv(enableKMS)) - // execute tests - if isFullMode() { - testMakeBucketErrorV2() - testGetObjectClosedTwiceV2() - testFPutObjectV2() - testMakeBucketRegionsV2() - testGetObjectReadSeekFunctionalV2() - testGetObjectReadAtFunctionalV2() - testCopyObjectV2() - testFunctionalV2() - testComposeObjectErrorCasesV2() - testCompose10KSourcesV2() - testUserMetadataCopyingV2() - testPutObject0ByteV2() - testPutObjectNoLengthV2() - testPutObjectsUnknownV2() - testGetObjectWithContextV2() - testFPutObjectWithContextV2() - testFGetObjectWithContextV2() - testPutObjectWithContextV2() - testMakeBucketError() - testMakeBucketRegions() - testPutObjectWithMetadata() - testPutObjectReadAt() - testPutObjectStreaming() - testGetObjectSeekEnd() - testGetObjectClosedTwice() - testRemoveMultipleObjects() - testFPutObjectMultipart() - testFPutObject() - testGetObjectReadSeekFunctional() - testGetObjectReadAtFunctional() - testPresignedPostPolicy() - testCopyObject() - testComposeObjectErrorCases() - testCompose10KSources() - testUserMetadataCopying() - testBucketNotification() - testFunctional() - testGetObjectModified() - testPutObjectUploadSeekedObject() - testGetObjectWithContext() - testFPutObjectWithContext() - testFGetObjectWithContext() - - testGetObjectACL() - - testPutObjectWithContext() - testStorageClassMetadataPutObject() - testStorageClassInvalidMetadataPutObject() - testStorageClassMetadataCopyObject() - testPutObjectWithContentLanguage() - testListObjects() - - // SSE-C tests will only work over TLS connection. - if tls { - testSSECEncryptionPutGet() - testSSECEncryptionFPut() - testSSECEncryptedGetObjectReadAtFunctional() - testSSECEncryptedGetObjectReadSeekFunctional() - testEncryptedCopyObjectV2() - testEncryptedSSECToSSECCopyObject() - testEncryptedSSECToUnencryptedCopyObject() - testUnencryptedToSSECCopyObject() - testUnencryptedToUnencryptedCopyObject() - testEncryptedEmptyObject() - testDecryptedCopyObject() - testSSECEncryptedToSSECCopyObjectPart() - testSSECMultipartEncryptedToSSECCopyObjectPart() - testSSECEncryptedToUnencryptedCopyPart() - testUnencryptedToSSECCopyObjectPart() - testUnencryptedToUnencryptedCopyPart() - if kmsEnabled { - testSSES3EncryptionPutGet() - testSSES3EncryptionFPut() - testSSES3EncryptedGetObjectReadAtFunctional() - testSSES3EncryptedGetObjectReadSeekFunctional() - testEncryptedSSECToSSES3CopyObject() - testEncryptedSSES3ToSSECCopyObject() - testEncryptedSSES3ToSSES3CopyObject() - testEncryptedSSES3ToUnencryptedCopyObject() - testUnencryptedToSSES3CopyObject() - testSSECEncryptedToSSES3CopyObjectPart() - testUnencryptedToSSES3CopyObjectPart() - testSSES3EncryptedToSSECCopyObjectPart() - testSSES3EncryptedToUnencryptedCopyPart() - testSSES3EncryptedToSSES3CopyObjectPart() - } - } - } else { - testFunctional() - testFunctionalV2() - } -} diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go deleted file mode 100644 index 2a7661c27b..0000000000 --- a/vendor/golang.org/x/net/internal/iana/gen.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates internet protocol constants and tables by -// reading IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "strconv" - "strings" -) - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml", - parseDSCPRegistry, - }, - { - "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", - parseProtocolNumbers, - }, - { - "https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml", - parseAddrFamilyNumbers, - }, -} - -func main() { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") - fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") - fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) - os.Exit(1) - } - if err := r.parse(&bb, resp.Body); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile("const.go", b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func parseDSCPRegistry(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var dr dscpRegistry - if err := dec.Decode(&dr); err != nil { - return err - } - fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) - fmt.Fprintf(w, "const (\n") - for _, dr := range dr.escapeDSCP() { - fmt.Fprintf(w, "DiffServ%s = %#02x", dr.Name, dr.Value) - fmt.Fprintf(w, "// %s\n", dr.OrigName) - } - for _, er := range dr.escapeECN() { - fmt.Fprintf(w, "%s = %#02x", er.Descr, er.Value) - fmt.Fprintf(w, "// %s\n", er.OrigDescr) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type dscpRegistry struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Note string `xml:"note"` - Registries []struct { - Title string `xml:"title"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Name string `xml:"name"` - Space string `xml:"space"` - } `xml:"record"` - } `xml:"registry"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonDSCPRecord struct { - OrigName string - Name string - Value int -} - -func (drr *dscpRegistry) escapeDSCP() []canonDSCPRecord { - var drs []canonDSCPRecord - for _, preg := range drr.Registries { - if !strings.Contains(preg.Title, "Differentiated Services Field Codepoints") { - continue - } - for _, reg := range preg.Registries { - if !strings.Contains(reg.Title, "Pool 1 Codepoints") { - continue - } - drs = make([]canonDSCPRecord, len(reg.Records)) - sr := strings.NewReplacer( - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, dr := range reg.Records { - s := strings.TrimSpace(dr.Name) - drs[i].OrigName = s - drs[i].Name = sr.Replace(s) - n, err := strconv.ParseUint(dr.Space, 2, 8) - if err != nil { - continue - } - drs[i].Value = int(n) << 2 - } - } - } - return drs -} - -type canonECNRecord struct { - OrigDescr string - Descr string - Value int -} - -func (drr *dscpRegistry) escapeECN() []canonECNRecord { - var ers []canonECNRecord - for _, reg := range drr.Registries { - if !strings.Contains(reg.Title, "ECN Field") { - continue - } - ers = make([]canonECNRecord, len(reg.Records)) - sr := strings.NewReplacer( - "Capable", "", - "Not-ECT", "", - "ECT(1)", "", - "ECT(0)", "", - "CE", "", - "(", "", - ")", "", - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, er := range reg.Records { - s := strings.TrimSpace(er.Descr) - ers[i].OrigDescr = s - ss := strings.Split(s, " ") - if len(ss) > 1 { - ers[i].Descr = strings.Join(ss[1:], " ") - } else { - ers[i].Descr = ss[0] - } - ers[i].Descr = sr.Replace(er.Descr) - n, err := strconv.ParseUint(er.Value, 2, 8) - if err != nil { - continue - } - ers[i].Value = int(n) - } - } - return ers -} - -func parseProtocolNumbers(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var pn protocolNumbers - if err := dec.Decode(&pn); err != nil { - return err - } - prs := pn.escape() - prs = append([]canonProtocolRecord{{ - Name: "IP", - Descr: "IPv4 encapsulation, pseudo protocol number", - Value: 0, - }}, prs...) - fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) - s := pr.Descr - if s == "" { - s = pr.OrigName - } - fmt.Fprintf(w, "// %s\n", s) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type protocolNumbers struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - RegTitle string `xml:"registry>title"` - Note string `xml:"registry>note"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - Descr string `xml:"description"` - } `xml:"registry>record"` -} - -type canonProtocolRecord struct { - OrigName string - Name string - Descr string - Value int -} - -func (pn *protocolNumbers) escape() []canonProtocolRecord { - prs := make([]canonProtocolRecord, len(pn.Records)) - sr := strings.NewReplacer( - "-in-", "in", - "-within-", "within", - "-over-", "over", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range pn.Records { - if strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "deprecated") { - continue - } - prs[i].OrigName = pr.Name - s := strings.TrimSpace(pr.Name) - switch pr.Name { - case "ISIS over IPv4": - prs[i].Name = "ISIS" - case "manet": - prs[i].Name = "MANET" - default: - prs[i].Name = sr.Replace(s) - } - ss := strings.Split(pr.Descr, "\n") - for i := range ss { - ss[i] = strings.TrimSpace(ss[i]) - } - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} - -func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var afn addrFamilylNumbers - if err := dec.Decode(&afn); err != nil { - return err - } - afrs := afn.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated) - fmt.Fprintf(w, "const (\n") - for _, afr := range afrs { - if afr.Name == "" { - continue - } - fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value) - fmt.Fprintf(w, "// %s\n", afr.Descr) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type addrFamilylNumbers struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - RegTitle string `xml:"registry>title"` - Note string `xml:"registry>note"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"registry>record"` -} - -type canonAddrFamilyRecord struct { - Name string - Descr string - Value int -} - -func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord { - afrs := make([]canonAddrFamilyRecord, len(afn.Records)) - sr := strings.NewReplacer( - "IP version 4", "IPv4", - "IP version 6", "IPv6", - "Identifier", "ID", - "-", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, afr := range afn.Records { - if strings.Contains(afr.Descr, "Unassigned") || - strings.Contains(afr.Descr, "Reserved") { - continue - } - afrs[i].Descr = afr.Descr - s := strings.TrimSpace(afr.Descr) - switch s { - case "IP (IP version 4)": - afrs[i].Name = "IPv4" - case "IP6 (IP version 6)": - afrs[i].Name = "IPv6" - case "AFI for L2VPN information": - afrs[i].Name = "L2VPN" - case "E.164 with NSAP format subaddress": - afrs[i].Name = "E164withSubaddress" - case "MT IP: Multi-Topology IP version 4": - afrs[i].Name = "MTIPv4" - case "MAC/24": - afrs[i].Name = "MACFinal24bits" - case "MAC/40": - afrs[i].Name = "MACFinal40bits" - case "IPv6/64": - afrs[i].Name = "IPv6Initial64bits" - default: - n := strings.Index(s, "(") - if n > 0 { - s = s[:n] - } - n = strings.Index(s, ":") - if n > 0 { - s = s[:n] - } - afrs[i].Name = sr.Replace(s) - } - afrs[i].Value, _ = strconv.Atoi(afr.Value) - } - return afrs -} diff --git a/vendor/golang.org/x/net/internal/socket/defs_aix.go b/vendor/golang.org/x/net/internal/socket/defs_aix.go deleted file mode 100644 index ae1b21c5e1..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_aix.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_darwin.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go deleted file mode 100644 index 85bb7450b0..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include -#include - -#define _GNU_SOURCE -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go deleted file mode 100644 index 5bfdd4676c..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_solaris.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/ipv4/defs_aix.go b/vendor/golang.org/x/net/ipv4/defs_aix.go deleted file mode 100644 index 0f37211c64..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_aix.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - // IP_RECVIF is defined on AIX but doesn't work. - // IP_RECVINTERFACE must be used instead. - sysIP_RECVIF = C.IP_RECVINTERFACE - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go deleted file mode 100644 index c8f2e05b81..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_darwin.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_STRIPHDR = C.IP_STRIPHDR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go deleted file mode 100644 index f30544ea24..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go deleted file mode 100644 index 4dd57d8653..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_freebsd.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_SENDSRCADDR = C.IP_SENDSRCADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_ONESBCAST = C.IP_ONESBCAST - sysIP_BINDANY = C.IP_BINDANY - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_MINTTL = C.IP_MINTTL - sysIP_DONTFRAG = C.IP_DONTFRAG - sysIP_RECVTOS = C.IP_RECVTOS - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go deleted file mode 100644 index beb11071ad..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_linux.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_PKTOPTIONS = C.IP_PKTOPTIONS - sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER - sysIP_RECVERR = C.IP_RECVERR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_RECVTOS = C.IP_RECVTOS - sysIP_MTU = C.IP_MTU - sysIP_FREEBIND = C.IP_FREEBIND - sysIP_TRANSPARENT = C.IP_TRANSPARENT - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR - sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR - sysIP_MINTTL = C.IP_MINTTL - sysIP_NODEFRAG = C.IP_NODEFRAG - sysIP_UNICAST_IF = C.IP_UNICAST_IF - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_MSFILTER = C.IP_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL - - //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT - //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT - //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO - //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE - //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE - //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT - - sysICMP_FILTER = C.ICMP_FILTER - - sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE - sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL - sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP - sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 - sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS - sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPFilter = C.sizeof_struct_icmp_filter - - sizeofSockFprog = C.sizeof_struct_sock_fprog -) - -type kernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type sockExtendedErr C.struct_sock_extended_err - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpFilter C.struct_icmp_filter - -type sockFProg C.struct_sock_fprog - -type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go deleted file mode 100644 index 8f8af1b899..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_netbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go deleted file mode 100644 index 8f8af1b899..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_openbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go deleted file mode 100644 index aeb33e9c8f..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_solaris.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVSLLA = C.IP_RECVSLLA - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_NEXTHOP = C.IP_NEXTHOP - - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - sysIP_DONTFRAG = C.IP_DONTFRAG - - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC - sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL - sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF - - sysIP_REUSEADDR = C.IP_REUSEADDR - sysIP_DONTROUTE = C.IP_DONTROUTE - sysIP_BROADCAST = C.IP_BROADCAST - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type ipMreq C.struct_ip_mreq - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go deleted file mode 100644 index 1bb1737f67..0000000000 --- a/vendor/golang.org/x/net/ipv4/gen.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", - parseICMPv4Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") - fmt.Fprintf(&bb, "package ipv4\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv4Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv4Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigDescr) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv4Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv4ParamRecord struct { - OrigDescr string - Descr string - Value int -} - -func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Descr, "Reserved") || - strings.Contains(pr.Descr, "Unassigned") || - strings.Contains(pr.Descr, "Deprecated") || - strings.Contains(pr.Descr, "Experiment") || - strings.Contains(pr.Descr, "experiment") { - continue - } - ss := strings.Split(pr.Descr, "\n") - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - s := strings.TrimSpace(prs[i].Descr) - prs[i].OrigDescr = s - prs[i].Descr = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/golang.org/x/net/ipv6/defs_aix.go b/vendor/golang.org/x/net/ipv6/defs_aix.go deleted file mode 100644 index ea396a3cb8..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_aix.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go deleted file mode 100644 index 55ddc116fc..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_darwin.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#define __APPLE_USE_RFC_3542 -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go deleted file mode 100644 index a4c383a515..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go deleted file mode 100644 index 53e625389a..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_freebsd.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_BINDANY = C.IPV6_BINDANY - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go deleted file mode 100644 index 3308cb2c38..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_linux.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIPV6_ADDRFORM = C.IPV6_ADDRFORM - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_FLOWINFO = C.IPV6_FLOWINFO - - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP - sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT - sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER - sysIPV6_MTU = C.IPV6_MTU - sysIPV6_RECVERR = C.IPV6_RECVERR - sysIPV6_V6ONLY = C.IPV6_V6ONLY - sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST - sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST - - //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT - //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT - //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO - //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE - //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE - //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT - - sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR - sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES - - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - - sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT - - sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR - sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR - sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT - sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF - - sysICMPV6_FILTER = C.ICMPV6_FILTER - - sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK - sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS - sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS - sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter - - sizeofSockFprog = C.sizeof_struct_sock_fprog -) - -type kernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6FlowlabelReq C.struct_in6_flowlabel_req - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter - -type sockFProg C.struct_sock_fprog - -type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go deleted file mode 100644 index be9ceb9cc0..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_netbsd.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go deleted file mode 100644 index 177ddf87d2..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_openbsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL - sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL - sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL - sysIPSEC6_OUTSA = C.IPSEC6_OUTSA - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_PIPEX = C.IPV6_PIPEX - - sysIPV6_RTABLE = C.IPV6_RTABLE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go deleted file mode 100644 index 0f8ce2b46a..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_solaris.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - - sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_SEC_OPT = C.IPV6_SEC_OPT - sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - - sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK - sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT - sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK - sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT - sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK - sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT - - sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK - - sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC - - sysICMP6_FILTER = C.ICMP6_FILTER - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go deleted file mode 100644 index 5885664fbc..0000000000 --- a/vendor/golang.org/x/net/ipv6/gen.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", - parseICMPv6Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") - fmt.Fprintf(&bb, "package ipv6\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv6Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv6Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigName) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv6Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv6ParamRecord struct { - OrigName string - Name string - Value int -} - -func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Name, "Reserved") || - strings.Contains(pr.Name, "Unassigned") || - strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "Experiment") || - strings.Contains(pr.Name, "experiment") { - continue - } - ss := strings.Split(pr.Name, "\n") - if len(ss) > 1 { - prs[i].Name = strings.Join(ss, " ") - } else { - prs[i].Name = ss[0] - } - s := strings.TrimSpace(prs[i].Name) - prs[i].OrigName = s - prs[i].Name = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go deleted file mode 100644 index 372ffbb24c..0000000000 --- a/vendor/golang.org/x/net/publicsuffix/gen.go +++ /dev/null @@ -1,717 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go based on the authoritative -// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat -// -// The version is derived from -// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat -// and a human-readable form is at -// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat -// -// To fetch a particular git revision, such as 5c70ccd250, pass -// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" -// and -version "an explicit version string". - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "sort" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // These sum of these four values must be no greater than 32. - nodesBitsChildren = 10 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - // These sum of these four values must be no greater than 32. - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -var ( - maxChildren int - maxTextOffset int - maxTextLength int - maxHi uint32 - maxLo uint32 -) - -func max(a, b int) int { - if a < b { - return b - } - return a -} - -func u32max(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 - numNodeType = 3 -) - -func nodeTypeStr(n int) string { - switch n { - case nodeTypeNormal: - return "+" - case nodeTypeException: - return "!" - case nodeTypeParentOnly: - return "o" - } - panic("unreachable") -} - -const ( - defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" - gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" -) - -var ( - labelEncoding = map[string]uint32{} - labelsList = []string{} - labelsMap = map[string]bool{} - rules = []string{} - numICANNRules = 0 - - // validSuffixRE is used to check that the entries in the public suffix - // list are in canonical form (after Punycode encoding). Specifically, - // capital letters are not allowed. - validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - - shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) - dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) - - comments = flag.Bool("comments", false, "generate table.go comments, for debugging") - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") -) - -func main() { - if err := main1(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main1() error { - flag.Parse() - if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { - return fmt.Errorf("not enough bits to encode the nodes table") - } - if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { - return fmt.Errorf("not enough bits to encode the children table") - } - if *version == "" { - if *url != defaultURL { - return fmt.Errorf("-version was not specified, and the -url is not the default one") - } - sha, date, err := gitCommit() - if err != nil { - return err - } - *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) - } - var r io.Reader = os.Stdin - if *url != "" { - res, err := http.Get(*url) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) - } - r = res.Body - defer res.Body.Close() - } - - var root node - icann := false - br := bufio.NewReader(r) - for { - s, err := br.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - s = strings.TrimSpace(s) - if strings.Contains(s, "BEGIN ICANN DOMAINS") { - if len(rules) != 0 { - return fmt.Errorf(`expected no rules before "BEGIN ICANN DOMAINS"`) - } - icann = true - continue - } - if strings.Contains(s, "END ICANN DOMAINS") { - icann, numICANNRules = false, len(rules) - continue - } - if s == "" || strings.HasPrefix(s, "//") { - continue - } - s, err = idna.ToASCII(s) - if err != nil { - return err - } - if !validSuffixRE.MatchString(s) { - return fmt.Errorf("bad publicsuffix.org list data: %q", s) - } - - if *subset { - switch { - case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): - case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): - case s == "ao" || strings.HasSuffix(s, ".ao"): - case s == "ar" || strings.HasSuffix(s, ".ar"): - case s == "arpa" || strings.HasSuffix(s, ".arpa"): - case s == "cy" || strings.HasSuffix(s, ".cy"): - case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): - case s == "jp": - case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): - case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): - case s == "om" || strings.HasSuffix(s, ".om"): - case s == "uk" || strings.HasSuffix(s, ".uk"): - case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): - case s == "tw" || strings.HasSuffix(s, ".tw"): - case s == "zw" || strings.HasSuffix(s, ".zw"): - case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): - // xn--p1ai is Russian-Cyrillic "рф". - default: - continue - } - } - - rules = append(rules, s) - - nt, wildcard := nodeTypeNormal, false - switch { - case strings.HasPrefix(s, "*."): - s, nt = s[2:], nodeTypeParentOnly - wildcard = true - case strings.HasPrefix(s, "!"): - s, nt = s[1:], nodeTypeException - } - labels := strings.Split(s, ".") - for n, i := &root, len(labels)-1; i >= 0; i-- { - label := labels[i] - n = n.child(label) - if i == 0 { - if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { - n.nodeType = nt - } - n.icann = n.icann && icann - n.wildcard = n.wildcard || wildcard - } - labelsMap[label] = true - } - } - labelsList = make([]string, 0, len(labelsMap)) - for label := range labelsMap { - labelsList = append(labelsList, label) - } - sort.Strings(labelsList) - - if err := generate(printReal, &root, "table.go"); err != nil { - return err - } - if err := generate(printTest, &root, "table_test.go"); err != nil { - return err - } - return nil -} - -func generate(p func(io.Writer, *node) error, root *node, filename string) error { - buf := new(bytes.Buffer) - if err := p(buf, root); err != nil { - return err - } - b, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - return ioutil.WriteFile(filename, b, 0644) -} - -func gitCommit() (sha, date string, retErr error) { - res, err := http.Get(gitCommitURL) - if err != nil { - return "", "", err - } - if res.StatusCode != http.StatusOK { - return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) - } - defer res.Body.Close() - b, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if m := shaRE.FindSubmatch(b); m != nil { - sha = string(m[1]) - } - if m := dateRE.FindSubmatch(b); m != nil { - date = string(m[1]) - } - if sha == "" || date == "" { - retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) - } - return sha, date, retErr -} - -func printTest(w io.Writer, n *node) error { - fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Fprintf(w, "package publicsuffix\n\nconst numICANNRules = %d\n\nvar rules = [...]string{\n", numICANNRules) - for _, rule := range rules { - fmt.Fprintf(w, "%q,\n", rule) - } - fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") - if err := n.walk(w, printNodeLabel); err != nil { - return err - } - fmt.Fprintf(w, "}\n") - return nil -} - -func printReal(w io.Writer, n *node) error { - const header = `// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = %q - -const ( - nodesBitsChildren = %d - nodesBitsICANN = %d - nodesBitsTextOffset = %d - nodesBitsTextLength = %d - - childrenBitsWildcard = %d - childrenBitsNodeType = %d - childrenBitsHi = %d - childrenBitsLo = %d -) - -const ( - nodeTypeNormal = %d - nodeTypeException = %d - nodeTypeParentOnly = %d -) - -// numTLD is the number of top level domains. -const numTLD = %d - -` - fmt.Fprintf(w, header, *version, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, - nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - - text := combineText(labelsList) - if text == "" { - return fmt.Errorf("internal error: makeText returned no text") - } - for _, label := range labelsList { - offset, length := strings.Index(text, label), len(label) - if offset < 0 { - return fmt.Errorf("internal error: could not find %q in text %q", label, text) - } - maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1< 64 { - n, plus = 64, " +" - } - fmt.Fprintf(w, "%q%s\n", text[:n], plus) - text = text[n:] - } - - if err := n.walk(w, assignIndexes); err != nil { - return err - } - - fmt.Fprintf(w, ` - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// If the table was generated with the -comments flag, there is a //-comment -// after each node's data. In it is the nodes-array indexes of the children, -// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] children index -// [%2d bits] ICANN bit -// [%2d bits] text index -// [%2d bits] text length -var nodes = [...]uint32{ -`, - 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) - if err := n.walk(w, printNode); err != nil { - return err - } - fmt.Fprintf(w, `} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] wildcard bit -// [%2d bits] node type -// [%2d bits] high nodes index (exclusive) of children -// [%2d bits] low nodes index (inclusive) of children -var children=[...]uint32{ -`, - 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) - for i, c := range childrenEncoding { - s := "---------------" - lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - if *comments { - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) - } else { - fmt.Fprintf(w, "0x%x,\n", c) - } - } - fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { - ss = ss[1:] - } - return ss -} - -// crush combines a list of strings, taking advantage of overlaps. It returns a -// single string that contains each input string as a substring. -func crush(ss []string) string { - maxLabelLen := 0 - for _, s := range ss { - if maxLabelLen < len(s) { - maxLabelLen = len(s) - } - } - - for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { - prefixes := makePrefixMap(ss, prefixLen) - for i, s := range ss { - if len(s) <= prefixLen { - continue - } - mergeLabel(ss, i, prefixLen, prefixes) - } - } - - return strings.Join(ss, "") -} - -// mergeLabel merges the label at ss[i] with the first available matching label -// in prefixMap, where the last "prefixLen" characters in ss[i] match the first -// "prefixLen" characters in the matching label. -// It will merge ss[i] repeatedly until no more matches are available. -// All matching labels merged into ss[i] are replaced by "". -func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { - s := ss[i] - suffix := s[len(s)-prefixLen:] - for _, j := range prefixes[suffix] { - // Empty strings mean "already used." Also avoid merging with self. - if ss[j] == "" || i == j { - continue - } - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", - prefixLen, i, j, ss[i], ss[j], suffix) - } - ss[i] += ss[j][prefixLen:] - ss[j] = "" - // ss[i] has a new suffix, so merge again if possible. - // Note: we only have to merge again at the same prefix length. Shorter - // prefix lengths will be handled in the next iteration of crush's for loop. - // Can there be matches for longer prefix lengths, introduced by the merge? - // I believe that any such matches would by necessity have been eliminated - // during substring removal or merged at a higher prefix length. For - // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" - // would yield "abcde", which could be merged with "bcdef." However, in - // practice "cde" would already have been elimintated by removeSubstrings. - mergeLabel(ss, i, prefixLen, prefixes) - return - } -} - -// prefixMap maps from a prefix to a list of strings containing that prefix. The -// list of strings is represented as indexes into a slice of strings stored -// elsewhere. -type prefixMap map[string][]int - -// makePrefixMap constructs a prefixMap from a slice of strings. -func makePrefixMap(ss []string, prefixLen int) prefixMap { - prefixes := make(prefixMap) - for i, s := range ss { - // We use < rather than <= because if a label matches on a prefix equal to - // its full length, that's actually a substring match handled by - // removeSubstrings. - if prefixLen < len(s) { - prefix := s[:prefixLen] - prefixes[prefix] = append(prefixes[prefix], i) - } - } - - return prefixes -} diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 4548b993db..0000000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build go1.12\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index 9feddd00c4..0000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index e4af9424e9..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - if sysname == "getdirentries64" { - // Special case - libSystem name and - // raw syscall name don't match. - sysname = "__getdirentries64" - } - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index f2c58fb7cc..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index 45b4429088..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,602 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - callgccgo := fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738b6..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index 07f8960ff3..0000000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:NO)?STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 25e834940d..0000000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type StTimespec C.struct_st_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e692b..0000000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79d0..0000000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index 7470798951..0000000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 2dd4f9542c..0000000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 8aafbe4469..0000000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f9348..0000000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/text/feature/plural/gen.go b/vendor/golang.org/x/text/feature/plural/gen.go deleted file mode 100644 index 42f2f8676f..0000000000 --- a/vendor/golang.org/x/text/feature/plural/gen.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file generates data for the CLDR plural rules, as defined in -// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules -// -// We assume a slightly simplified grammar: -// -// condition = and_condition ('or' and_condition)* samples -// and_condition = relation ('and' relation)* -// relation = expr ('=' | '!=') range_list -// expr = operand ('%' '10' '0'* )? -// operand = 'n' | 'i' | 'f' | 't' | 'v' | 'w' -// range_list = (range | value) (',' range_list)* -// range = value'..'value -// value = digit+ -// digit = 0|1|2|3|4|5|6|7|8|9 -// -// samples = ('@integer' sampleList)? -// ('@decimal' sampleList)? -// sampleList = sampleRange (',' sampleRange)* (',' ('…'|'...'))? -// sampleRange = decimalValue ('~' decimalValue)? -// decimalValue = value ('.' value)? -// -// Symbol Value -// n absolute value of the source number (integer and decimals). -// i integer digits of n. -// v number of visible fraction digits in n, with trailing zeros. -// w number of visible fraction digits in n, without trailing zeros. -// f visible fractional digits in n, with trailing zeros. -// t visible fractional digits in n, without trailing zeros. -// -// The algorithm for which the data is generated is based on the following -// observations -// -// - the number of different sets of numbers which the plural rules use to -// test inclusion is limited, -// - most numbers that are tested on are < 100 -// -// This allows us to define a bitmap for each number < 100 where a bit i -// indicates whether this number is included in some defined set i. -// The function matchPlural in plural.go defines how we can subsequently use -// this data to determine inclusion. -// -// There are a few languages for which this doesn't work. For one Italian and -// Azerbaijan, which both test against numbers > 100 for ordinals and Breton, -// which considers whether numbers are multiples of hundreds. The model here -// could be extended to handle Italian and Azerbaijan fairly easily (by -// considering the numbers 100, 200, 300, ..., 800, 900 in addition to the first -// 100), but for now it seems easier to just hard-code these cases. - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "log" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/language" - "golang.org/x/text/internal/language/compact" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", "tables.go", "output file") - outputTestFile = flag.String("testoutput", "data_test.go", "output file") - - draft = flag.String("draft", - "contributed", - `Minimal draft requirements (approved, contributed, provisional, unconfirmed).`) -) - -func main() { - gen.Init() - - const pkg = "plural" - - gen.Repackage("gen_common.go", "common.go", pkg) - // Read the CLDR zip file. - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - d.SetDirFilter("supplemental", "main") - d.SetSectionFilter("numbers", "plurals") - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer w.WriteGoFile(*outputFile, pkg) - - gen.WriteCLDRVersion(w) - - genPlurals(w, data) - - w = gen.NewCodeWriter() - defer w.WriteGoFile(*outputTestFile, pkg) - - genPluralsTests(w, data) -} - -type pluralTest struct { - locales string // space-separated list of locales for this test - form int // Use int instead of Form to simplify generation. - integer []string // Entries of the form \d+ or \d+~\d+ - decimal []string // Entries of the form \f+ or \f+ +~\f+, where f is \d+\.\d+ -} - -func genPluralsTests(w *gen.CodeWriter, data *cldr.CLDR) { - w.WriteType(pluralTest{}) - - for _, plurals := range data.Supplemental().Plurals { - if plurals.Type == "" { - // The empty type is reserved for plural ranges. - continue - } - tests := []pluralTest{} - - for _, pRules := range plurals.PluralRules { - for _, rule := range pRules.PluralRule { - test := pluralTest{ - locales: pRules.Locales, - form: int(countMap[rule.Count]), - } - scan := bufio.NewScanner(strings.NewReader(rule.Data())) - scan.Split(splitTokens) - var p *[]string - for scan.Scan() { - switch t := scan.Text(); t { - case "@integer": - p = &test.integer - case "@decimal": - p = &test.decimal - case ",", "…": - default: - if p != nil { - *p = append(*p, t) - } - } - } - tests = append(tests, test) - } - } - w.WriteVar(plurals.Type+"Tests", tests) - } -} - -func genPlurals(w *gen.CodeWriter, data *cldr.CLDR) { - for _, plurals := range data.Supplemental().Plurals { - if plurals.Type == "" { - continue - } - // Initialize setMap and inclusionMasks. They are already populated with - // a few entries to serve as an example and to assign nice numbers to - // common cases. - - // setMap contains sets of numbers represented by boolean arrays where - // a true value for element i means that the number i is included. - setMap := map[[numN]bool]int{ - // The above init func adds an entry for including all numbers. - [numN]bool{1: true}: 1, // fix {1} to a nice value - [numN]bool{2: true}: 2, // fix {2} to a nice value - [numN]bool{0: true}: 3, // fix {0} to a nice value - } - - // inclusionMasks contains bit masks for every number under numN to - // indicate in which set the number is included. Bit 1 << x will be set - // if it is included in set x. - inclusionMasks := [numN]uint64{ - // Note: these entries are not complete: more bits will be set along the way. - 0: 1 << 3, - 1: 1 << 1, - 2: 1 << 2, - } - - // Create set {0..99}. We will assign this set the identifier 0. - var all [numN]bool - for i := range all { - // Mark number i as being included in the set (which has identifier 0). - inclusionMasks[i] |= 1 << 0 - // Mark number i as included in the set. - all[i] = true - } - // Register the identifier for the set. - setMap[all] = 0 - - rules := []pluralCheck{} - index := []byte{0} - langMap := map[compact.ID]byte{0: 0} - - for _, pRules := range plurals.PluralRules { - // Parse the rules. - var conds []orCondition - for _, rule := range pRules.PluralRule { - form := countMap[rule.Count] - conds = parsePluralCondition(conds, rule.Data(), form) - } - // Encode the rules. - for _, c := range conds { - // If an or condition only has filters, we create an entry for - // this filter and the set that contains all values. - empty := true - for _, b := range c.used { - empty = empty && !b - } - if empty { - rules = append(rules, pluralCheck{ - cat: byte(opMod< 0xFF { - log.Fatalf("Too many entries for rules: %#x", len(rules)) - } - if len(index) > 0xFF { - log.Fatalf("Too many entries for index: %#x", len(index)) - } - if len(setMap) > 64 { // maximum number of bits. - log.Fatalf("Too many entries for setMap: %d", len(setMap)) - } - w.WriteComment( - "Slots used for %s: %X of 0xFF rules; %X of 0xFF indexes; %d of 64 sets", - plurals.Type, len(rules), len(index), len(setMap)) - // Prevent comment from attaching to the next entry. - fmt.Fprint(w, "\n\n") - } -} - -type orCondition struct { - original string // for debugging - - form Form - used [32]bool - set [32][numN]bool -} - -func (o *orCondition) add(op opID, mod int, v []int) (ok bool) { - ok = true - for _, x := range v { - if x >= maxMod { - ok = false - break - } - } - for i := 0; i < numN; i++ { - m := i - if mod != 0 { - m = i % mod - } - if !intIn(m, v) { - o.set[op][i] = false - } - } - if ok { - o.used[op] = true - } - return ok -} - -func intIn(x int, a []int) bool { - for _, y := range a { - if x == y { - return true - } - } - return false -} - -var operandIndex = map[string]opID{ - "i": opI, - "n": opN, - "f": opF, - "v": opV, - "w": opW, -} - -// parsePluralCondition parses the condition of a single pluralRule and appends -// the resulting or conditions to conds. -// -// Example rules: -// // Category "one" in English: only allow 1 with no visible fraction -// i = 1 and v = 0 @integer 1 -// -// // Category "few" in Czech: all numbers with visible fractions -// v != 0 @decimal ... -// -// // Category "zero" in Latvian: all multiples of 10 or the numbers 11-19 or -// // numbers with a fraction 11..19 and no trailing zeros. -// n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 @integer ... -// -// @integer and @decimal are followed by examples and are not relevant for the -// rule itself. The are used here to signal the termination of the rule. -func parsePluralCondition(conds []orCondition, s string, f Form) []orCondition { - scan := bufio.NewScanner(strings.NewReader(s)) - scan.Split(splitTokens) - for { - cond := orCondition{original: s, form: f} - // Set all numbers to be allowed for all number classes and restrict - // from here on. - for i := range cond.set { - for j := range cond.set[i] { - cond.set[i][j] = true - } - } - andLoop: - for { - var token string - scan.Scan() // Must exist. - switch class := scan.Text(); class { - case "t": - class = "w" // equal to w for t == 0 - fallthrough - case "n", "i", "f", "v", "w": - op := scanToken(scan) - opCode := operandIndex[class] - mod := 0 - if op == "%" { - opCode |= opMod - - switch v := scanUint(scan); v { - case 10, 100: - mod = v - case 1000: - // A more general solution would be to allow checking - // against multiples of 100 and include entries for the - // numbers 100..900 in the inclusion masks. At the - // moment this would only help Azerbaijan and Italian. - - // Italian doesn't use '%', so this must be Azerbaijan. - cond.used[opAzerbaijan00s] = true - return append(conds, cond) - - case 1000000: - cond.used[opBretonM] = true - return append(conds, cond) - - default: - log.Fatalf("Modulo value not supported %d", v) - } - op = scanToken(scan) - } - if op != "=" && op != "!=" { - log.Fatalf("Unexpected op %q", op) - } - if op == "!=" { - opCode |= opNotEqual - } - a := []int{} - v := scanUint(scan) - if class == "w" && v != 0 { - log.Fatalf("Must compare against zero for operand type %q", class) - } - token = scanToken(scan) - for { - switch token { - case "..": - end := scanUint(scan) - for ; v <= end; v++ { - a = append(a, v) - } - token = scanToken(scan) - default: // ",", "or", "and", "@..." - a = append(a, v) - } - if token != "," { - break - } - v = scanUint(scan) - token = scanToken(scan) - } - if !cond.add(opCode, mod, a) { - // Detected large numbers. As we ruled out Azerbaijan, this - // must be the many rule for Italian ordinals. - cond.set[opItalian800] = cond.set[opN] - cond.used[opItalian800] = true - } - - case "@integer", "@decimal": // "other" entry: tests only. - return conds - default: - log.Fatalf("Unexpected operand class %q (%s)", class, s) - } - switch token { - case "or": - conds = append(conds, cond) - break andLoop - case "@integer", "@decimal": // examples - // There is always an example in practice, so we always terminate here. - if err := scan.Err(); err != nil { - log.Fatal(err) - } - return append(conds, cond) - case "and": - // keep accumulating - default: - log.Fatalf("Unexpected token %q", token) - } - } - } -} - -func scanToken(scan *bufio.Scanner) string { - scan.Scan() - return scan.Text() -} - -func scanUint(scan *bufio.Scanner) int { - scan.Scan() - val, err := strconv.ParseUint(scan.Text(), 10, 32) - if err != nil { - log.Fatal(err) - } - return int(val) -} - -// splitTokens can be used with bufio.Scanner to tokenize CLDR plural rules. -func splitTokens(data []byte, atEOF bool) (advance int, token []byte, err error) { - condTokens := [][]byte{ - []byte(".."), - []byte(","), - []byte("!="), - []byte("="), - } - advance, token, err = bufio.ScanWords(data, atEOF) - for _, t := range condTokens { - if len(t) >= len(token) { - continue - } - switch p := bytes.Index(token, t); { - case p == -1: - case p == 0: - advance = len(t) - token = token[:len(t)] - return advance - len(token) + len(t), token[:len(t)], err - case p < advance: - // Don't split when "=" overlaps "!=". - if t[0] == '=' && token[p-1] == '!' { - continue - } - advance = p - token = token[:p] - } - } - return advance, token, err -} diff --git a/vendor/golang.org/x/text/feature/plural/gen_common.go b/vendor/golang.org/x/text/feature/plural/gen_common.go deleted file mode 100644 index 24aa41505a..0000000000 --- a/vendor/golang.org/x/text/feature/plural/gen_common.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Form defines a plural form. -// -// Not all languages support all forms. Also, the meaning of each form varies -// per language. It is important to note that the name of a form does not -// necessarily correspond one-to-one with the set of numbers. For instance, -// for Croation, One matches not only 1, but also 11, 21, etc. -// -// Each language must at least support the form "other". -type Form byte - -const ( - Other Form = iota - Zero - One - Two - Few - Many -) - -var countMap = map[string]Form{ - "other": Other, - "zero": Zero, - "one": One, - "two": Two, - "few": Few, - "many": Many, -} - -type pluralCheck struct { - // category: - // 3..7: opID - // 0..2: category - cat byte - setID byte -} - -// opID identifies the type of operand in the plural rule, being i, n or f. -// (v, w, and t are treated as filters in our implementation.) -type opID byte - -const ( - opMod opID = 0x1 // is '%' used? - opNotEqual opID = 0x2 // using "!=" to compare - opI opID = 0 << 2 // integers after taking the absolute value - opN opID = 1 << 2 // full number (must be integer) - opF opID = 2 << 2 // fraction - opV opID = 3 << 2 // number of visible digits - opW opID = 4 << 2 // number of visible digits without trailing zeros - opBretonM opID = 5 << 2 // hard-wired rule for Breton - opItalian800 opID = 6 << 2 // hard-wired rule for Italian - opAzerbaijan00s opID = 7 << 2 // hard-wired rule for Azerbaijan -) -const ( - // Use this plural form to indicate the next rule needs to match as well. - // The last condition in the list will have the correct plural form. - andNext = 0x7 - formMask = 0x7 - - opShift = 3 - - // numN indicates the maximum integer, or maximum mod value, for which we - // have inclusion masks. - numN = 100 - // The common denominator of the modulo that is taken. - maxMod = 100 -) diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go deleted file mode 100644 index 0c36a052f6..0000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "flag" - "fmt" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -func main() { - gen.Init() - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "compact") - - fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`) - - b := newBuilder(w) - gen.WriteCLDRVersion(w) - - b.writeCompactIndex() -} - -type builder struct { - w *gen.CodeWriter - data *cldr.CLDR - supp *cldr.SupplementalData -} - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatal(err) - } - b := builder{ - w: w, - data: data, - supp: data.Supplemental(), - } - return &b -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go deleted file mode 100644 index 136cefaf08..0000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file generates derivative tables based on the language package itself. - -import ( - "fmt" - "log" - "sort" - "strings" - - "golang.org/x/text/internal/language" -) - -// Compact indices: -// Note -va-X variants only apply to localization variants. -// BCP variants only ever apply to language. -// The only ambiguity between tags is with regions. - -func (b *builder) writeCompactIndex() { - // Collect all language tags for which we have any data in CLDR. - m := map[language.Tag]bool{} - for _, lang := range b.data.Locales() { - // We include all locales unconditionally to be consistent with en_US. - // We want en_US, even though it has no data associated with it. - - // TODO: put any of the languages for which no data exists at the end - // of the index. This allows all components based on ICU to use that - // as the cutoff point. - // if x := data.RawLDML(lang); false || - // x.LocaleDisplayNames != nil || - // x.Characters != nil || - // x.Delimiters != nil || - // x.Measurement != nil || - // x.Dates != nil || - // x.Numbers != nil || - // x.Units != nil || - // x.ListPatterns != nil || - // x.Collations != nil || - // x.Segmentations != nil || - // x.Rbnf != nil || - // x.Annotations != nil || - // x.Metadata != nil { - - // TODO: support POSIX natively, albeit non-standard. - tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) - m[tag] = true - // } - } - - // TODO: plural rules are also defined for the deprecated tags: - // iw mo sh tl - // Consider removing these as compact tags. - - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.supp.Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - m[language.Make(lang)] = true - } - } - } - - var coreTags []language.CompactCoreInfo - var special []string - - for t := range m { - if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { - log.Fatalf("Unexpected extension %v in %v", x, t) - } - if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { - cci, ok := language.GetCompactCore(t) - if !ok { - log.Fatalf("Locale for non-basic language %q", t) - } - coreTags = append(coreTags, cci) - } else { - special = append(special, t.String()) - } - } - - w := b.w - - sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] }) - sort.Strings(special) - - w.WriteComment(` - NumCompactTags is the number of common tags. The maximum tag is - NumCompactTags-1.`) - w.WriteConst("NumCompactTags", len(m)) - - fmt.Fprintln(w, "const (") - for i, t := range coreTags { - fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i) - } - for i, t := range special { - fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags)) - } - fmt.Fprintln(w, ")") - - w.WriteVar("coreTags", coreTags) - - w.WriteConst("specialTagsStr", strings.Join(special, " ")) -} - -func ident(s string) string { - return strings.Replace(s, "-", "", -1) + "Index" -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go deleted file mode 100644 index 9543d58323..0000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/language" - "golang.org/x/text/internal/language/compact" - "golang.org/x/text/unicode/cldr" -) - -func main() { - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer w.WriteGoFile("parents.go", "compact") - - // Create parents table. - type ID uint16 - parents := make([]ID, compact.NumCompactTags) - for _, loc := range data.Locales() { - tag := language.MustParse(loc) - index, ok := compact.FromTag(tag) - if !ok { - continue - } - parentIndex := compact.ID(0) // und - for p := tag.Parent(); p != language.Und; p = p.Parent() { - if x, ok := compact.FromTag(p); ok { - parentIndex = x - break - } - } - parents[index] = ID(parentIndex) - } - - w.WriteComment(` - parents maps a compact index of a tag to the compact index of the parent of - this tag.`) - w.WriteVar("parents", parents) -} diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go deleted file mode 100644 index cdcc7febcb..0000000000 --- a/vendor/golang.org/x/text/internal/language/gen.go +++ /dev/null @@ -1,1520 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/tag" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -var comment = []string{ - ` -lang holds an alphabetically sorted list of ISO-639 language identifiers. -All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. -For 2-byte language identifiers, the two successive bytes have the following meaning: - - if the first letter of the 2- and 3-letter ISO codes are the same: - the second and third letter of the 3-letter ISO code. - - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. -For 3-byte language identifiers the 4th byte is 0.`, - ` -langNoIndex is a bit vector of all 3-letter language codes that are not used as an index -in lookup tables. The language ids for these language codes are derived directly -from the letters and are not consecutive.`, - ` -altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives -to 2-letter language codes that cannot be derived using the method described above. -Each 3-letter code is followed by its 1-byte langID.`, - ` -altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, - ` -AliasMap maps langIDs to their suggested replacements.`, - ` -script is an alphabetically sorted list of ISO 15924 codes. The index -of the script in the string, divided by 4, is the internal scriptID.`, - ` -isoRegionOffset needs to be added to the index of regionISO to obtain the regionID -for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for -the UN.M49 codes used for groups.)`, - ` -regionISO holds a list of alphabetically sorted 2-letter ISO region codes. -Each 2-letter codes is followed by two bytes with the following meaning: - - [A-Z}{2}: the first letter of the 2-letter code plus these two - letters form the 3-letter ISO code. - - 0, n: index into altRegionISO3.`, - ` -regionTypes defines the status of a region for various standards.`, - ` -m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are -codes indicating collections of regions.`, - ` -m49Index gives indexes into fromM49 based on the three most significant bits -of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in - fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] -for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. -The region code is stored in the 9 lsb of the indexed value.`, - ` -fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, - ` -altRegionISO3 holds a list of 3-letter region codes that cannot be -mapped to 2-letter codes using the default algorithm. This is a short list.`, - ` -altRegionIDs holds a list of regionIDs the positions of which match those -of the 3-letter ISO codes in altRegionISO3.`, - ` -variantNumSpecialized is the number of specialized variants in variants.`, - ` -suppressScript is an index from langID to the dominant script for that language, -if it exists. If a script is given, it should be suppressed from the language tag.`, - ` -likelyLang is a lookup table, indexed by langID, for the most likely -scripts and regions given incomplete information. If more entries exist for a -given language, region and script are the index and size respectively -of the list in likelyLangList.`, - ` -likelyLangList holds lists info associated with likelyLang.`, - ` -likelyRegion is a lookup table, indexed by regionID, for the most likely -languages and scripts given incomplete information. If more entries exist -for a given regionID, lang and script are the index and size respectively -of the list in likelyRegionList. -TODO: exclude containers and user-definable regions from the list.`, - ` -likelyRegionList holds lists info associated with likelyRegion.`, - ` -likelyScript is a lookup table, indexed by scriptID, for the most likely -languages and regions given a script.`, - ` -nRegionGroups is the number of region groups.`, - ` -regionInclusion maps region identifiers to sets of regions in regionInclusionBits, -where each set holds all groupings that are directly connected in a region -containment graph.`, - ` -regionInclusionBits is an array of bit vectors where every vector represents -a set of region groupings. These sets are used to compute the distance -between two regions for the purpose of language matching.`, - ` -regionInclusionNext marks, for each entry in regionInclusionBits, the set of -all groups that are reachable from the groups set in the respective entry.`, -} - -// TODO: consider changing some of these structures to tries. This can reduce -// memory, but may increase the need for memory allocations. This could be -// mitigated if we can piggyback on language tags for common cases. - -func failOnError(e error) { - if e != nil { - log.Panic(e) - } -} - -type setType int - -const ( - Indexed setType = 1 + iota // all elements must be of same size - Linear -) - -type stringSet struct { - s []string - sorted, frozen bool - - // We often need to update values after the creation of an index is completed. - // We include a convenience map for keeping track of this. - update map[string]string - typ setType // used for checking. -} - -func (ss *stringSet) clone() stringSet { - c := *ss - c.s = append([]string(nil), c.s...) - return c -} - -func (ss *stringSet) setType(t setType) { - if ss.typ != t && ss.typ != 0 { - log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) - } -} - -// parse parses a whitespace-separated string and initializes ss with its -// components. -func (ss *stringSet) parse(s string) { - scan := bufio.NewScanner(strings.NewReader(s)) - scan.Split(bufio.ScanWords) - for scan.Scan() { - ss.add(scan.Text()) - } -} - -func (ss *stringSet) assertChangeable() { - if ss.frozen { - log.Panic("attempt to modify a frozen stringSet") - } -} - -func (ss *stringSet) add(s string) { - ss.assertChangeable() - ss.s = append(ss.s, s) - ss.sorted = ss.frozen -} - -func (ss *stringSet) freeze() { - ss.compact() - ss.frozen = true -} - -func (ss *stringSet) compact() { - if ss.sorted { - return - } - a := ss.s - sort.Strings(a) - k := 0 - for i := 1; i < len(a); i++ { - if a[k] != a[i] { - a[k+1] = a[i] - k++ - } - } - ss.s = a[:k+1] - ss.sorted = ss.frozen -} - -type funcSorter struct { - fn func(a, b string) bool - sort.StringSlice -} - -func (s funcSorter) Less(i, j int) bool { - return s.fn(s.StringSlice[i], s.StringSlice[j]) -} - -func (ss *stringSet) sortFunc(f func(a, b string) bool) { - ss.compact() - sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) -} - -func (ss *stringSet) remove(s string) { - ss.assertChangeable() - if i, ok := ss.find(s); ok { - copy(ss.s[i:], ss.s[i+1:]) - ss.s = ss.s[:len(ss.s)-1] - } -} - -func (ss *stringSet) replace(ol, nu string) { - ss.s[ss.index(ol)] = nu - ss.sorted = ss.frozen -} - -func (ss *stringSet) index(s string) int { - ss.setType(Indexed) - i, ok := ss.find(s) - if !ok { - if i < len(ss.s) { - log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) - } - log.Panicf("find: item %q is not in list", s) - - } - return i -} - -func (ss *stringSet) find(s string) (int, bool) { - ss.compact() - i := sort.SearchStrings(ss.s, s) - return i, i != len(ss.s) && ss.s[i] == s -} - -func (ss *stringSet) slice() []string { - ss.compact() - return ss.s -} - -func (ss *stringSet) updateLater(v, key string) { - if ss.update == nil { - ss.update = map[string]string{} - } - ss.update[v] = key -} - -// join joins the string and ensures that all entries are of the same length. -func (ss *stringSet) join() string { - ss.setType(Indexed) - n := len(ss.s[0]) - for _, s := range ss.s { - if len(s) != n { - log.Panicf("join: not all entries are of the same length: %q", s) - } - } - ss.s = append(ss.s, strings.Repeat("\xff", n)) - return strings.Join(ss.s, "") -} - -// ianaEntry holds information for an entry in the IANA Language Subtag Repository. -// All types use the same entry. -// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various -// fields. -type ianaEntry struct { - typ string - description []string - scope string - added string - preferred string - deprecated string - suppressScript string - macro string - prefix []string -} - -type builder struct { - w *gen.CodeWriter - hw io.Writer // MultiWriter for w and w.Hash - data *cldr.CLDR - supp *cldr.SupplementalData - - // indices - locale stringSet // common locales - lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data - langNoIndex stringSet // 3-letter ISO codes with no associated data - script stringSet // 4-letter ISO codes - region stringSet // 2-letter ISO or 3-digit UN M49 codes - variant stringSet // 4-8-alphanumeric variant code. - - // Region codes that are groups with their corresponding group IDs. - groups map[int]index - - // langInfo - registry map[string]*ianaEntry -} - -type index uint - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - failOnError(err) - b := builder{ - w: w, - hw: io.MultiWriter(w, w.Hash), - data: data, - supp: data.Supplemental(), - } - b.parseRegistry() - return &b -} - -func (b *builder) parseRegistry() { - r := gen.OpenIANAFile("assignments/language-subtag-registry") - defer r.Close() - b.registry = make(map[string]*ianaEntry) - - scan := bufio.NewScanner(r) - scan.Split(bufio.ScanWords) - var record *ianaEntry - for more := scan.Scan(); more; { - key := scan.Text() - more = scan.Scan() - value := scan.Text() - switch key { - case "Type:": - record = &ianaEntry{typ: value} - case "Subtag:", "Tag:": - if s := strings.SplitN(value, "..", 2); len(s) > 1 { - for a := s[0]; a <= s[1]; a = inc(a) { - b.addToRegistry(a, record) - } - } else { - b.addToRegistry(value, record) - } - case "Suppress-Script:": - record.suppressScript = value - case "Added:": - record.added = value - case "Deprecated:": - record.deprecated = value - case "Macrolanguage:": - record.macro = value - case "Preferred-Value:": - record.preferred = value - case "Prefix:": - record.prefix = append(record.prefix, value) - case "Scope:": - record.scope = value - case "Description:": - buf := []byte(value) - for more = scan.Scan(); more; more = scan.Scan() { - b := scan.Bytes() - if b[0] == '%' || b[len(b)-1] == ':' { - break - } - buf = append(buf, ' ') - buf = append(buf, b...) - } - record.description = append(record.description, string(buf)) - continue - default: - continue - } - more = scan.Scan() - } - if scan.Err() != nil { - log.Panic(scan.Err()) - } -} - -func (b *builder) addToRegistry(key string, entry *ianaEntry) { - if info, ok := b.registry[key]; ok { - if info.typ != "language" || entry.typ != "extlang" { - log.Fatalf("parseRegistry: tag %q already exists", key) - } - } else { - b.registry[key] = entry - } -} - -var commentIndex = make(map[string]string) - -func init() { - for _, s := range comment { - key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) - commentIndex[key] = s - } -} - -func (b *builder) comment(name string) { - if s := commentIndex[name]; len(s) > 0 { - b.w.WriteComment(s) - } else { - fmt.Fprintln(b.w) - } -} - -func (b *builder) pf(f string, x ...interface{}) { - fmt.Fprintf(b.hw, f, x...) - fmt.Fprint(b.hw, "\n") -} - -func (b *builder) p(x ...interface{}) { - fmt.Fprintln(b.hw, x...) -} - -func (b *builder) addSize(s int) { - b.w.Size += s - b.pf("// Size: %d bytes", s) -} - -func (b *builder) writeConst(name string, x interface{}) { - b.comment(name) - b.w.WriteConst(name, x) -} - -// writeConsts computes f(v) for all v in values and writes the results -// as constants named _v to a single constant block. -func (b *builder) writeConsts(f func(string) int, values ...string) { - b.pf("const (") - for _, v := range values { - b.pf("\t_%s = %v", v, f(v)) - } - b.pf(")") -} - -// writeType writes the type of the given value, which must be a struct. -func (b *builder) writeType(value interface{}) { - b.comment(reflect.TypeOf(value).Name()) - b.w.WriteType(value) -} - -func (b *builder) writeSlice(name string, ss interface{}) { - b.writeSliceAddSize(name, 0, ss) -} - -func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { - b.comment(name) - b.w.Size += extraSize - v := reflect.ValueOf(ss) - t := v.Type().Elem() - b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) - - fmt.Fprintf(b.w, "var %s = ", name) - b.w.WriteArray(ss) - b.p() -} - -type FromTo struct { - From, To uint16 -} - -func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { - ss.sortFunc(func(a, b string) bool { - return index(a) < index(b) - }) - m := []FromTo{} - for _, s := range ss.s { - m = append(m, FromTo{index(s), index(ss.update[s])}) - } - b.writeSlice(name, m) -} - -const base = 'z' - 'a' + 1 - -func strToInt(s string) uint { - v := uint(0) - for i := 0; i < len(s); i++ { - v *= base - v += uint(s[i] - 'a') - } - return v -} - -// converts the given integer to the original ASCII string passed to strToInt. -// len(s) must match the number of characters obtained. -func intToStr(v uint, s []byte) { - for i := len(s) - 1; i >= 0; i-- { - s[i] = byte(v%base) + 'a' - v /= base - } -} - -func (b *builder) writeBitVector(name string, ss []string) { - vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) - for _, s := range ss { - v := strToInt(s) - vec[v/8] |= 1 << (v % 8) - } - b.writeSlice(name, vec) -} - -// TODO: convert this type into a list or two-stage trie. -func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size())) - for _, k := range m { - sz += len(k) - } - b.addSize(sz) - keys := []string{} - b.pf(`var %s = map[string]uint16{`, name) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - b.pf("\t%q: %v,", k, f(m[k])) - } - b.p("}") -} - -func (b *builder) writeMap(name string, m interface{}) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) - b.addSize(sz) - f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { - return strings.IndexRune("{}, ", r) != -1 - }) - sort.Strings(f[1:]) - b.pf(`var %s = %s{`, name, f[0]) - for _, kv := range f[1:] { - b.pf("\t%s,", kv) - } - b.p("}") -} - -func (b *builder) langIndex(s string) uint16 { - if s == "und" { - return 0 - } - if i, ok := b.lang.find(s); ok { - return uint16(i) - } - return uint16(strToInt(s)) + uint16(len(b.lang.s)) -} - -// inc advances the string to its lexicographical successor. -func inc(s string) string { - const maxTagLength = 4 - var buf [maxTagLength]byte - intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) - for i := 0; i < len(s); i++ { - if s[i] <= 'Z' { - buf[i] -= 'a' - 'A' - } - } - return string(buf[:len(s)]) -} - -func (b *builder) parseIndices() { - meta := b.supp.Metadata - - for k, v := range b.registry { - var ss *stringSet - switch v.typ { - case "language": - if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { - b.lang.add(k) - continue - } else { - ss = &b.langNoIndex - } - case "region": - ss = &b.region - case "script": - ss = &b.script - case "variant": - ss = &b.variant - default: - continue - } - ss.add(k) - } - // Include any language for which there is data. - for _, lang := range b.data.Locales() { - if x := b.data.RawLDML(lang); false || - x.LocaleDisplayNames != nil || - x.Characters != nil || - x.Delimiters != nil || - x.Measurement != nil || - x.Dates != nil || - x.Numbers != nil || - x.Units != nil || - x.ListPatterns != nil || - x.Collations != nil || - x.Segmentations != nil || - x.Rbnf != nil || - x.Annotations != nil || - x.Metadata != nil { - - from := strings.Split(lang, "_") - if lang := from[0]; lang != "root" { - b.lang.add(lang) - } - } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - if lang = strings.Split(lang, "_")[0]; lang != "root" { - b.lang.add(lang) - } - } - } - } - // Include languages in likely subtags. - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - b.lang.add(from[0]) - } - // Include ISO-639 alpha-3 bibliographic entries. - for _, a := range meta.Alias.LanguageAlias { - if a.Reason == "bibliographic" { - b.langNoIndex.add(a.Type) - } - } - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 { - b.region.add(reg.Type) - } - } - - for _, s := range b.lang.s { - if len(s) == 3 { - b.langNoIndex.remove(s) - } - } - b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) - b.writeConst("NumScripts", len(b.script.slice())) - b.writeConst("NumRegions", len(b.region.slice())) - - // Add dummy codes at the start of each list to represent "unspecified". - b.lang.add("---") - b.script.add("----") - b.region.add("---") - - // common locales - b.locale.parse(meta.DefaultContent.Locales) -} - -// TODO: region inclusion data will probably not be use used in future matchers. - -func (b *builder) computeRegionGroups() { - b.groups = make(map[int]index) - - // Create group indices. - for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. - b.groups[i] = index(len(b.groups)) - } - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - if _, ok := b.groups[group]; !ok { - b.groups[group] = index(len(b.groups)) - } - } - if len(b.groups) > 64 { - log.Fatalf("only 64 groups supported, found %d", len(b.groups)) - } - b.writeConst("nRegionGroups", len(b.groups)) -} - -var langConsts = []string{ - "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", - "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", - "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", - "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", - "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", - "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", - - // constants for grandfathered tags (if not already defined) - "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", - "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", -} - -// writeLanguage generates all tables needed for language canonicalization. -func (b *builder) writeLanguage() { - meta := b.supp.Metadata - - b.writeConst("nonCanonicalUnd", b.lang.index("und")) - b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) - b.writeConst("langPrivateStart", b.langIndex("qaa")) - b.writeConst("langPrivateEnd", b.langIndex("qtz")) - - // Get language codes that need to be mapped (overlong 3-letter codes, - // deprecated 2-letter codes, legacy and grandfathered tags.) - langAliasMap := stringSet{} - aliasTypeMap := map[string]AliasType{} - - // altLangISO3 get the alternative ISO3 names that need to be mapped. - altLangISO3 := stringSet{} - // Add dummy start to avoid the use of index 0. - altLangISO3.add("---") - altLangISO3.updateLater("---", "aa") - - lang := b.lang.clone() - for _, a := range meta.Alias.LanguageAlias { - if a.Replacement == "" { - a.Replacement = "und" - } - // TODO: support mapping to tags - repl := strings.SplitN(a.Replacement, "_", 2)[0] - if a.Reason == "overlong" { - if len(a.Replacement) == 2 && len(a.Type) == 3 { - lang.updateLater(a.Replacement, a.Type) - } - } else if len(a.Type) <= 3 { - switch a.Reason { - case "macrolanguage": - aliasTypeMap[a.Type] = Macro - case "deprecated": - // handled elsewhere - continue - case "bibliographic", "legacy": - if a.Type == "no" { - continue - } - aliasTypeMap[a.Type] = Legacy - default: - log.Fatalf("new %s alias: %s", a.Reason, a.Type) - } - langAliasMap.add(a.Type) - langAliasMap.updateLater(a.Type, repl) - } - } - // Manually add the mapping of "nb" (Norwegian) to its macro language. - // This can be removed if CLDR adopts this change. - langAliasMap.add("nb") - langAliasMap.updateLater("nb", "no") - aliasTypeMap["nb"] = Macro - - for k, v := range b.registry { - // Also add deprecated values for 3-letter ISO codes, which CLDR omits. - if v.typ == "language" && v.deprecated != "" && v.preferred != "" { - langAliasMap.add(k) - langAliasMap.updateLater(k, v.preferred) - aliasTypeMap[k] = Deprecated - } - } - // Fix CLDR mappings. - lang.updateLater("tl", "tgl") - lang.updateLater("sh", "hbs") - lang.updateLater("mo", "mol") - lang.updateLater("no", "nor") - lang.updateLater("tw", "twi") - lang.updateLater("nb", "nob") - lang.updateLater("ak", "aka") - lang.updateLater("bh", "bih") - - // Ensure that each 2-letter code is matched with a 3-letter code. - for _, v := range lang.s[1:] { - s, ok := lang.update[v] - if !ok { - if s, ok = lang.update[langAliasMap.update[v]]; !ok { - continue - } - lang.update[v] = s - } - if v[0] != s[0] { - altLangISO3.add(s) - altLangISO3.updateLater(s, v) - } - } - - // Complete canonicalized language tags. - lang.freeze() - for i, v := range lang.s { - // We can avoid these manual entries by using the IANA registry directly. - // Seems easier to update the list manually, as changes are rare. - // The panic in this loop will trigger if we miss an entry. - add := "" - if s, ok := lang.update[v]; ok { - if s[0] == v[0] { - add = s[1:] - } else { - add = string([]byte{0, byte(altLangISO3.index(s))}) - } - } else if len(v) == 3 { - add = "\x00" - } else { - log.Panicf("no data for long form of %q", v) - } - lang.s[i] += add - } - b.writeConst("lang", tag.Index(lang.join())) - - b.writeConst("langNoIndexOffset", len(b.lang.s)) - - // space of all valid 3-letter language identifiers. - b.writeBitVector("langNoIndex", b.langNoIndex.slice()) - - altLangIndex := []uint16{} - for i, s := range altLangISO3.slice() { - altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) - if i > 0 { - idx := b.lang.index(altLangISO3.update[s]) - altLangIndex = append(altLangIndex, uint16(idx)) - } - } - b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) - b.writeSlice("altLangIndex", altLangIndex) - - b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex) - types := make([]AliasType, len(langAliasMap.s)) - for i, s := range langAliasMap.s { - types[i] = aliasTypeMap[s] - } - b.writeSlice("AliasTypes", types) -} - -var scriptConsts = []string{ - "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", - "Zzzz", -} - -func (b *builder) writeScript() { - b.writeConsts(b.script.index, scriptConsts...) - b.writeConst("script", tag.Index(b.script.join())) - - supp := make([]uint8, len(b.lang.slice())) - for i, v := range b.lang.slice()[1:] { - if sc := b.registry[v].suppressScript; sc != "" { - supp[i+1] = uint8(b.script.index(sc)) - } - } - b.writeSlice("suppressScript", supp) - - // There is only one deprecated script in CLDR. This value is hard-coded. - // We check here if the code must be updated. - for _, a := range b.supp.Metadata.Alias.ScriptAlias { - if a.Type != "Qaai" { - log.Panicf("unexpected deprecated stript %q", a.Type) - } - } -} - -func parseM49(s string) int16 { - if len(s) == 0 { - return 0 - } - v, err := strconv.ParseUint(s, 10, 10) - failOnError(err) - return int16(v) -} - -var regionConsts = []string{ - "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", - "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. -} - -func (b *builder) writeRegion() { - b.writeConsts(b.region.index, regionConsts...) - - isoOffset := b.region.index("AA") - m49map := make([]int16, len(b.region.slice())) - fromM49map := make(map[int16]int) - altRegionISO3 := "" - altRegionIDs := []uint16{} - - b.writeConst("isoRegionOffset", isoOffset) - - // 2-letter region lookup and mapping to numeric codes. - regionISO := b.region.clone() - regionISO.s = regionISO.s[isoOffset:] - regionISO.sorted = false - - regionTypes := make([]byte, len(b.region.s)) - - // Is the region valid BCP 47? - for s, e := range b.registry { - if len(s) == 2 && s == strings.ToUpper(s) { - i := b.region.index(s) - for _, d := range e.description { - if strings.Contains(d, "Private use") { - regionTypes[i] = iso3166UserAssigned - } - } - regionTypes[i] |= bcp47Region - } - } - - // Is the region a valid ccTLD? - r := gen.OpenIANAFile("domains/root/db") - defer r.Close() - - buf, err := ioutil.ReadAll(r) - failOnError(err) - re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) - for _, m := range re.FindAllSubmatch(buf, -1) { - i := b.region.index(strings.ToUpper(string(m[1]))) - regionTypes[i] |= ccTLD - } - - b.writeSlice("regionTypes", regionTypes) - - iso3Set := make(map[string]int) - update := func(iso2, iso3 string) { - i := regionISO.index(iso2) - if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { - regionISO.s[i] += iso3[1:] - iso3Set[iso3] = -1 - } else { - if ok && j >= 0 { - regionISO.s[i] += string([]byte{0, byte(j)}) - } else { - iso3Set[iso3] = len(altRegionISO3) - regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) - altRegionISO3 += iso3 - altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - i := regionISO.index(tc.Type) + isoOffset - if d := m49map[i]; d != 0 { - log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) - } - m49 := parseM49(tc.Numeric) - m49map[i] = m49 - if r := fromM49map[m49]; r == 0 { - fromM49map[m49] = i - } else if r != i { - dep := b.registry[regionISO.s[r-isoOffset]].deprecated - if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { - fromM49map[m49] = i - } - } - } - for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { - if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { - from := parseM49(ta.Type) - if r := fromM49map[from]; r == 0 { - fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - if len(tc.Alpha3) == 3 { - update(tc.Type, tc.Alpha3) - } - } - // This entries are not included in territoryCodes. Mostly 3-letter variants - // of deleted codes and an entry for QU. - for _, m := range []struct{ iso2, iso3 string }{ - {"CT", "CTE"}, - {"DY", "DHY"}, - {"HV", "HVO"}, - {"JT", "JTN"}, - {"MI", "MID"}, - {"NH", "NHB"}, - {"NQ", "ATN"}, - {"PC", "PCI"}, - {"PU", "PUS"}, - {"PZ", "PCZ"}, - {"RH", "RHO"}, - {"VD", "VDR"}, - {"WK", "WAK"}, - // These three-letter codes are used for others as well. - {"FQ", "ATF"}, - } { - update(m.iso2, m.iso3) - } - for i, s := range regionISO.s { - if len(s) != 4 { - regionISO.s[i] = s + " " - } - } - b.writeConst("regionISO", tag.Index(regionISO.join())) - b.writeConst("altRegionISO3", altRegionISO3) - b.writeSlice("altRegionIDs", altRegionIDs) - - // Create list of deprecated regions. - // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only - // Transitionally-reserved mapping not included. - regionOldMap := stringSet{} - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { - regionOldMap.add(reg.Type) - regionOldMap.updateLater(reg.Type, reg.Replacement) - i, _ := regionISO.find(reg.Type) - j, _ := regionISO.find(reg.Replacement) - if k := m49map[i+isoOffset]; k == 0 { - m49map[i+isoOffset] = m49map[j+isoOffset] - } - } - } - b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { - return uint16(b.region.index(s)) - }) - // 3-digit region lookup, groupings. - for i := 1; i < isoOffset; i++ { - m := parseM49(b.region.s[i]) - m49map[i] = m - fromM49map[m] = i - } - b.writeSlice("m49", m49map) - - const ( - searchBits = 7 - regionBits = 9 - ) - if len(m49map) >= 1< %d", len(m49map), 1<>searchBits] = int16(len(fromM49)) - } - b.writeSlice("m49Index", m49Index) - b.writeSlice("fromM49", fromM49) -} - -const ( - // TODO: put these lists in regionTypes as user data? Could be used for - // various optimizations and refinements and could be exposed in the API. - iso3166Except = "AC CP DG EA EU FX IC SU TA UK" - iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. - // DY and RH are actually not deleted, but indeterminately reserved. - iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" -) - -const ( - iso3166UserAssigned = 1 << iota - ccTLD - bcp47Region -) - -func find(list []string, s string) int { - for i, t := range list { - if t == s { - return i - } - } - return -1 -} - -// writeVariants generates per-variant information and creates a map from variant -// name to index value. We assign index values such that sorting multiple -// variants by index value will result in the correct order. -// There are two types of variants: specialized and general. Specialized variants -// are only applicable to certain language or language-script pairs. Generalized -// variants apply to any language. Generalized variants always sort after -// specialized variants. We will therefore always assign a higher index value -// to a generalized variant than any other variant. Generalized variants are -// sorted alphabetically among themselves. -// Specialized variants may also sort after other specialized variants. Such -// variants will be ordered after any of the variants they may follow. -// We assume that if a variant x is followed by a variant y, then for any prefix -// p of x, p-x is a prefix of y. This allows us to order tags based on the -// maximum of the length of any of its prefixes. -// TODO: it is possible to define a set of Prefix values on variants such that -// a total order cannot be defined to the point that this algorithm breaks. -// In other words, we cannot guarantee the same order of variants for the -// future using the same algorithm or for non-compliant combinations of -// variants. For this reason, consider using simple alphabetic sorting -// of variants and ignore Prefix restrictions altogether. -func (b *builder) writeVariant() { - generalized := stringSet{} - specialized := stringSet{} - specializedExtend := stringSet{} - // Collate the variants by type and check assumptions. - for _, v := range b.variant.slice() { - e := b.registry[v] - if len(e.prefix) == 0 { - generalized.add(v) - continue - } - c := strings.Split(e.prefix[0], "-") - hasScriptOrRegion := false - if len(c) > 1 { - _, hasScriptOrRegion = b.script.find(c[1]) - if !hasScriptOrRegion { - _, hasScriptOrRegion = b.region.find(c[1]) - - } - } - if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { - // Variant is preceded by a language. - specialized.add(v) - continue - } - // Variant is preceded by another variant. - specializedExtend.add(v) - prefix := c[0] + "-" - if hasScriptOrRegion { - prefix += c[1] - } - for _, p := range e.prefix { - // Verify that the prefix minus the last element is a prefix of the - // predecessor element. - i := strings.LastIndex(p, "-") - pred := b.registry[p[i+1:]] - if find(pred.prefix, p[:i]) < 0 { - log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) - } - // The sorting used below does not work in the general case. It works - // if we assume that variants that may be followed by others only have - // prefixes of the same length. Verify this. - count := strings.Count(p[:i], "-") - for _, q := range pred.prefix { - if c := strings.Count(q, "-"); c != count { - log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) - } - } - if !strings.HasPrefix(p, prefix) { - log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) - } - } - } - - // Sort extended variants. - a := specializedExtend.s - less := func(v, w string) bool { - // Sort by the maximum number of elements. - maxCount := func(s string) (max int) { - for _, p := range b.registry[s].prefix { - if c := strings.Count(p, "-"); c > max { - max = c - } - } - return - } - if cv, cw := maxCount(v), maxCount(w); cv != cw { - return cv < cw - } - // Sort by name as tie breaker. - return v < w - } - sort.Sort(funcSorter{less, sort.StringSlice(a)}) - specializedExtend.frozen = true - - // Create index from variant name to index. - variantIndex := make(map[string]uint8) - add := func(s []string) { - for _, v := range s { - variantIndex[v] = uint8(len(variantIndex)) - } - } - add(specialized.slice()) - add(specializedExtend.s) - numSpecialized := len(variantIndex) - add(generalized.slice()) - if n := len(variantIndex); n > 255 { - log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) - } - b.writeMap("variantIndex", variantIndex) - b.writeConst("variantNumSpecialized", numSpecialized) -} - -func (b *builder) writeLanguageInfo() { -} - -// writeLikelyData writes tables that are used both for finding parent relations and for -// language matching. Each entry contains additional bits to indicate the status of the -// data to know when it cannot be used for parent relations. -func (b *builder) writeLikelyData() { - const ( - isList = 1 << iota - scriptInFrom - regionInFrom - ) - type ( // generated types - likelyScriptRegion struct { - region uint16 - script uint8 - flags uint8 - } - likelyLangScript struct { - lang uint16 - script uint8 - flags uint8 - } - likelyLangRegion struct { - lang uint16 - region uint16 - } - // likelyTag is used for getting likely tags for group regions, where - // the likely region might be a region contained in the group. - likelyTag struct { - lang uint16 - region uint16 - script uint8 - } - ) - var ( // generated variables - likelyRegionGroup = make([]likelyTag, len(b.groups)) - likelyLang = make([]likelyScriptRegion, len(b.lang.s)) - likelyRegion = make([]likelyLangScript, len(b.region.s)) - likelyScript = make([]likelyLangRegion, len(b.script.s)) - likelyLangList = []likelyScriptRegion{} - likelyRegionList = []likelyLangScript{} - ) - type fromTo struct { - from, to []string - } - langToOther := map[int][]fromTo{} - regionToOther := map[int][]fromTo{} - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - to := strings.Split(m.To, "_") - if len(to) != 3 { - log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) - } - if len(from) > 3 { - log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) - } - if from[0] != to[0] && from[0] != "und" { - log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) - } - if len(from) == 3 { - if from[2] != to[2] { - log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) - } - if from[0] != "und" { - log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) - } - } - if len(from) == 1 || from[0] != "und" { - id := 0 - if from[0] != "und" { - id = b.lang.index(from[0]) - } - langToOther[id] = append(langToOther[id], fromTo{from, to}) - } else if len(from) == 2 && len(from[1]) == 4 { - sid := b.script.index(from[1]) - likelyScript[sid].lang = uint16(b.langIndex(to[0])) - likelyScript[sid].region = uint16(b.region.index(to[2])) - } else { - r := b.region.index(from[len(from)-1]) - if id, ok := b.groups[r]; ok { - if from[0] != "und" { - log.Fatalf("region changed unexpectedly: %s -> %s", from, to) - } - likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) - likelyRegionGroup[id].script = uint8(b.script.index(to[1])) - likelyRegionGroup[id].region = uint16(b.region.index(to[2])) - } else { - regionToOther[r] = append(regionToOther[r], fromTo{from, to}) - } - } - } - b.writeType(likelyLangRegion{}) - b.writeSlice("likelyScript", likelyScript) - - for id := range b.lang.s { - list := langToOther[id] - if len(list) == 1 { - likelyLang[id].region = uint16(b.region.index(list[0].to[2])) - likelyLang[id].script = uint8(b.script.index(list[0].to[1])) - } else if len(list) > 1 { - likelyLang[id].flags = isList - likelyLang[id].region = uint16(len(likelyLangList)) - likelyLang[id].script = uint8(len(list)) - for _, x := range list { - flags := uint8(0) - if len(x.from) > 1 { - if x.from[1] == x.to[2] { - flags = regionInFrom - } else { - flags = scriptInFrom - } - } - likelyLangList = append(likelyLangList, likelyScriptRegion{ - region: uint16(b.region.index(x.to[2])), - script: uint8(b.script.index(x.to[1])), - flags: flags, - }) - } - } - } - // TODO: merge suppressScript data with this table. - b.writeType(likelyScriptRegion{}) - b.writeSlice("likelyLang", likelyLang) - b.writeSlice("likelyLangList", likelyLangList) - - for id := range b.region.s { - list := regionToOther[id] - if len(list) == 1 { - likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) - likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) - if len(list[0].from) > 2 { - likelyRegion[id].flags = scriptInFrom - } - } else if len(list) > 1 { - likelyRegion[id].flags = isList - likelyRegion[id].lang = uint16(len(likelyRegionList)) - likelyRegion[id].script = uint8(len(list)) - for i, x := range list { - if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { - log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) - } - x := likelyLangScript{ - lang: uint16(b.langIndex(x.to[0])), - script: uint8(b.script.index(x.to[1])), - } - if len(list[0].from) > 2 { - x.flags = scriptInFrom - } - likelyRegionList = append(likelyRegionList, x) - } - } - } - b.writeType(likelyLangScript{}) - b.writeSlice("likelyRegion", likelyRegion) - b.writeSlice("likelyRegionList", likelyRegionList) - - b.writeType(likelyTag{}) - b.writeSlice("likelyRegionGroup", likelyRegionGroup) -} - -func (b *builder) writeRegionInclusionData() { - var ( - // mm holds for each group the set of groups with a distance of 1. - mm = make(map[int][]index) - - // containment holds for each group the transitive closure of - // containment of other groups. - containment = make(map[index][]index) - ) - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - groupIdx := b.groups[group] - for _, mem := range strings.Split(g.Contains, " ") { - r := b.region.index(mem) - mm[r] = append(mm[r], groupIdx) - if g, ok := b.groups[r]; ok { - mm[group] = append(mm[group], g) - containment[groupIdx] = append(containment[groupIdx], g) - } - } - } - - regionContainment := make([]uint64, len(b.groups)) - for _, g := range b.groups { - l := containment[g] - - // Compute the transitive closure of containment. - for i := 0; i < len(l); i++ { - l = append(l, containment[l[i]]...) - } - - // Compute the bitmask. - regionContainment[g] = 1 << g - for _, v := range l { - regionContainment[g] |= 1 << v - } - } - b.writeSlice("regionContainment", regionContainment) - - regionInclusion := make([]uint8, len(b.region.s)) - bvs := make(map[uint64]index) - // Make the first bitvector positions correspond with the groups. - for r, i := range b.groups { - bv := uint64(1 << i) - for _, g := range mm[r] { - bv |= 1 << g - } - bvs[bv] = i - regionInclusion[r] = uint8(bvs[bv]) - } - for r := 1; r < len(b.region.s); r++ { - if _, ok := b.groups[r]; !ok { - bv := uint64(0) - for _, g := range mm[r] { - bv |= 1 << g - } - if bv == 0 { - // Pick the world for unspecified regions. - bv = 1 << b.groups[b.region.index("001")] - } - if _, ok := bvs[bv]; !ok { - bvs[bv] = index(len(bvs)) - } - regionInclusion[r] = uint8(bvs[bv]) - } - } - b.writeSlice("regionInclusion", regionInclusion) - regionInclusionBits := make([]uint64, len(bvs)) - for k, v := range bvs { - regionInclusionBits[v] = uint64(k) - } - // Add bit vectors for increasingly large distances until a fixed point is reached. - regionInclusionNext := []uint8{} - for i := 0; i < len(regionInclusionBits); i++ { - bits := regionInclusionBits[i] - next := bits - for i := uint(0); i < uint(len(b.groups)); i++ { - if bits&(1< 0xBF { // 1011 1111: highest continuation byte - log.Fatalf("Last byte of zero value overflows for %s", ns.Id) - } - - i := rune(0) - for _, r := range ns.Digits { - // Verify that we can do simple math on the UTF-8 byte sequence - // of zero to get the digit. - if zero+i != r { - // Runes not consecutive. - log.Fatalf("Digit %d of %s (%U) is not offset correctly from zero value", i, ns.Id, r) - } - i++ - } - var x [utf8.UTFMax]byte - utf8.EncodeRune(x[:], zero) - id := system(len(numSysData)) - systemMap[ns.Id] = id - numSysData = append(numSysData, systemData{ - id: id, - digitSize: byte(sz), - zero: x, - }) - } - w.WriteVar("numSysData", numSysData) - - algoID := system(len(numSysData)) - fmt.Fprintln(w, "const (") - for _, ns := range data.Supplemental().NumberingSystems.NumberingSystem { - id, ok := systemMap[ns.Id] - if !ok { - id = algoID - systemMap[ns.Id] = id - algoID++ - } - fmt.Fprintf(w, "num%s = %#x\n", strings.Title(ns.Id), id) - } - fmt.Fprintln(w, "numNumberSystems") - fmt.Fprintln(w, ")") - - fmt.Fprintln(w, "var systemMap = map[string]system{") - for _, ns := range data.Supplemental().NumberingSystems.NumberingSystem { - fmt.Fprintf(w, "%q: num%s,\n", ns.Id, strings.Title(ns.Id)) - w.Size += len(ns.Id) + 16 + 1 // very coarse approximation - } - fmt.Fprintln(w, "}") -} - -func genSymbols(w *gen.CodeWriter, data *cldr.CLDR) { - d, err := cldr.ParseDraft(*draft) - if err != nil { - log.Fatalf("invalid draft level: %v", err) - } - - nNumberSystems := system(len(systemMap)) - - type symbols [NumSymbolTypes]string - - type key struct { - tag compact.ID - system system - } - symbolMap := map[key]*symbols{} - - defaults := map[compact.ID]system{} - - for _, lang := range data.Locales() { - ldml := data.RawLDML(lang) - if ldml.Numbers == nil { - continue - } - langIndex, ok := compact.FromTag(language.MustParse(lang)) - if !ok { - log.Fatalf("No compact index for language %s", lang) - } - if d := ldml.Numbers.DefaultNumberingSystem; len(d) > 0 { - defaults[langIndex] = getNumberSystem(d[0].Data()) - } - - syms := cldr.MakeSlice(&ldml.Numbers.Symbols) - syms.SelectDraft(d) - - getFirst := func(name string, x interface{}) string { - v := reflect.ValueOf(x) - slice := cldr.MakeSlice(x) - slice.SelectAnyOf("alt", "", "alt") - if reflect.Indirect(v).Len() == 0 { - return "" - } else if reflect.Indirect(v).Len() > 1 { - log.Fatalf("%s: multiple values of %q within single symbol not supported.", lang, name) - } - return reflect.Indirect(v).Index(0).MethodByName("Data").Call(nil)[0].String() - } - - for _, sym := range ldml.Numbers.Symbols { - if sym.NumberSystem == "" { - // This is just linking the default of root to "latn". - continue - } - symbolMap[key{langIndex, getNumberSystem(sym.NumberSystem)}] = &symbols{ - SymDecimal: getFirst("decimal", &sym.Decimal), - SymGroup: getFirst("group", &sym.Group), - SymList: getFirst("list", &sym.List), - SymPercentSign: getFirst("percentSign", &sym.PercentSign), - SymPlusSign: getFirst("plusSign", &sym.PlusSign), - SymMinusSign: getFirst("minusSign", &sym.MinusSign), - SymExponential: getFirst("exponential", &sym.Exponential), - SymSuperscriptingExponent: getFirst("superscriptingExponent", &sym.SuperscriptingExponent), - SymPerMille: getFirst("perMille", &sym.PerMille), - SymInfinity: getFirst("infinity", &sym.Infinity), - SymNan: getFirst("nan", &sym.Nan), - SymTimeSeparator: getFirst("timeSeparator", &sym.TimeSeparator), - } - } - } - - // Expand all values. - for k, syms := range symbolMap { - for t := SymDecimal; t < NumSymbolTypes; t++ { - p := k.tag - for syms[t] == "" { - p = p.Parent() - if pSyms, ok := symbolMap[key{p, k.system}]; ok && (*pSyms)[t] != "" { - syms[t] = (*pSyms)[t] - break - } - if p == 0 /* und */ { - // Default to root, latn. - syms[t] = (*symbolMap[key{}])[t] - } - } - } - } - - // Unique the symbol sets and write the string data. - m := map[symbols]int{} - sb := stringset.NewBuilder() - - symIndex := [][NumSymbolTypes]byte{} - - for ns := system(0); ns < nNumberSystems; ns++ { - for _, l := range data.Locales() { - langIndex, _ := compact.FromTag(language.MustParse(l)) - s := symbolMap[key{langIndex, ns}] - if s == nil { - continue - } - if _, ok := m[*s]; !ok { - m[*s] = len(symIndex) - sb.Add(s[:]...) - var x [NumSymbolTypes]byte - for i := SymDecimal; i < NumSymbolTypes; i++ { - x[i] = byte(sb.Index((*s)[i])) - } - symIndex = append(symIndex, x) - } - } - } - w.WriteVar("symIndex", symIndex) - w.WriteVar("symData", sb.Set()) - - // resolveSymbolIndex gets the index from the closest matching locale, - // including the locale itself. - resolveSymbolIndex := func(langIndex compact.ID, ns system) symOffset { - for { - if sym := symbolMap[key{langIndex, ns}]; sym != nil { - return symOffset(m[*sym]) - } - if langIndex == 0 { - return 0 // und, latn - } - langIndex = langIndex.Parent() - } - } - - // Create an index with the symbols for each locale for the latn numbering - // system. If this is not the default, or the only one, for a locale, we - // will overwrite the value later. - var langToDefaults [compact.NumCompactTags]symOffset - for _, l := range data.Locales() { - langIndex, _ := compact.FromTag(language.MustParse(l)) - langToDefaults[langIndex] = resolveSymbolIndex(langIndex, 0) - } - - // Delete redundant entries. - for _, l := range data.Locales() { - langIndex, _ := compact.FromTag(language.MustParse(l)) - def := defaults[langIndex] - syms := symbolMap[key{langIndex, def}] - if syms == nil { - continue - } - for ns := system(0); ns < nNumberSystems; ns++ { - if ns == def { - continue - } - if altSyms, ok := symbolMap[key{langIndex, ns}]; ok && *altSyms == *syms { - delete(symbolMap, key{langIndex, ns}) - } - } - } - - // Create a sorted list of alternatives per language. This will only need to - // be referenced if a user specified an alternative numbering system. - var langToAlt []altSymData - for _, l := range data.Locales() { - langIndex, _ := compact.FromTag(language.MustParse(l)) - start := len(langToAlt) - if start >= hasNonLatnMask { - log.Fatalf("Number of alternative assignments >= %x", hasNonLatnMask) - } - // Create the entry for the default value. - def := defaults[langIndex] - langToAlt = append(langToAlt, altSymData{ - compactTag: langIndex, - system: def, - symIndex: resolveSymbolIndex(langIndex, def), - }) - - for ns := system(0); ns < nNumberSystems; ns++ { - if def == ns { - continue - } - if sym := symbolMap[key{langIndex, ns}]; sym != nil { - langToAlt = append(langToAlt, altSymData{ - compactTag: langIndex, - system: ns, - symIndex: resolveSymbolIndex(langIndex, ns), - }) - } - } - if def == 0 && len(langToAlt) == start+1 { - // No additional data: erase the entry. - langToAlt = langToAlt[:start] - } else { - // Overwrite the entry in langToDefaults. - langToDefaults[langIndex] = hasNonLatnMask | symOffset(start) - } - } - w.WriteComment(` -langToDefaults maps a compact language index to the default numbering system -and default symbol set`) - w.WriteVar("langToDefaults", langToDefaults) - - w.WriteComment(` -langToAlt is a list of numbering system and symbol set pairs, sorted and -marked by compact language index.`) - w.WriteVar("langToAlt", langToAlt) -} - -// genFormats generates the lookup table for decimal, scientific and percent -// patterns. -// -// CLDR allows for patterns to be different per language for different numbering -// systems. In practice the patterns are set to be consistent for a language -// independent of the numbering system. genFormats verifies that no language -// deviates from this. -func genFormats(w *gen.CodeWriter, data *cldr.CLDR) { - d, err := cldr.ParseDraft(*draft) - if err != nil { - log.Fatalf("invalid draft level: %v", err) - } - - // Fill the first slot with a dummy so we can identify unspecified tags. - formats := []number.Pattern{{}} - patterns := map[string]int{} - - // TODO: It would be possible to eliminate two of these slices by having - // another indirection and store a reference to the combination of patterns. - decimal := make([]byte, compact.NumCompactTags) - scientific := make([]byte, compact.NumCompactTags) - percent := make([]byte, compact.NumCompactTags) - - for _, lang := range data.Locales() { - ldml := data.RawLDML(lang) - if ldml.Numbers == nil { - continue - } - langIndex, ok := compact.FromTag(language.MustParse(lang)) - if !ok { - log.Fatalf("No compact index for language %s", lang) - } - type patternSlice []*struct { - cldr.Common - Numbers string `xml:"numbers,attr"` - Count string `xml:"count,attr"` - } - - add := func(name string, tags []byte, ps patternSlice) { - sl := cldr.MakeSlice(&ps) - sl.SelectDraft(d) - if len(ps) == 0 { - return - } - if len(ps) > 2 || len(ps) == 2 && ps[0] != ps[1] { - log.Fatalf("Inconsistent %d patterns for language %s", name, lang) - } - s := ps[0].Data() - - index, ok := patterns[s] - if !ok { - nf, err := number.ParsePattern(s) - if err != nil { - log.Fatal(err) - } - index = len(formats) - patterns[s] = index - formats = append(formats, *nf) - } - tags[langIndex] = byte(index) - } - - for _, df := range ldml.Numbers.DecimalFormats { - for _, l := range df.DecimalFormatLength { - if l.Type != "" { - continue - } - for _, f := range l.DecimalFormat { - add("decimal", decimal, f.Pattern) - } - } - } - for _, df := range ldml.Numbers.ScientificFormats { - for _, l := range df.ScientificFormatLength { - if l.Type != "" { - continue - } - for _, f := range l.ScientificFormat { - add("scientific", scientific, f.Pattern) - } - } - } - for _, df := range ldml.Numbers.PercentFormats { - for _, l := range df.PercentFormatLength { - if l.Type != "" { - continue - } - for _, f := range l.PercentFormat { - add("percent", percent, f.Pattern) - } - } - } - } - - // Complete the parent tag array to reflect inheritance. An index of 0 - // indicates an unspecified value. - for _, data := range [][]byte{decimal, scientific, percent} { - for i := range data { - p := compact.ID(i) - for ; data[p] == 0; p = p.Parent() { - } - data[i] = data[p] - } - } - w.WriteVar("tagToDecimal", decimal) - w.WriteVar("tagToScientific", scientific) - w.WriteVar("tagToPercent", percent) - - value := strings.Replace(fmt.Sprintf("%#v", formats), "number.", "", -1) - // Break up the lines. This won't give ideal perfect formatting, but it is - // better than one huge line. - value = strings.Replace(value, ", ", ",\n", -1) - fmt.Fprintf(w, "var formats = %s\n", value) -} diff --git a/vendor/golang.org/x/text/internal/number/gen_common.go b/vendor/golang.org/x/text/internal/number/gen_common.go deleted file mode 100644 index b1b41a73a9..0000000000 --- a/vendor/golang.org/x/text/internal/number/gen_common.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode/utf8" - - "golang.org/x/text/internal/language/compact" -) - -// A system identifies a CLDR numbering system. -type system byte - -type systemData struct { - id system - digitSize byte // number of UTF-8 bytes per digit - zero [utf8.UTFMax]byte // UTF-8 sequence of zero digit. -} - -// A SymbolType identifies a symbol of a specific kind. -type SymbolType int - -const ( - SymDecimal SymbolType = iota - SymGroup - SymList - SymPercentSign - SymPlusSign - SymMinusSign - SymExponential - SymSuperscriptingExponent - SymPerMille - SymInfinity - SymNan - SymTimeSeparator - - NumSymbolTypes -) - -const hasNonLatnMask = 0x8000 - -// symOffset is an offset into altSymData if the bit indicated by hasNonLatnMask -// is not 0 (with this bit masked out), and an offset into symIndex otherwise. -// -// TODO: this type can be a byte again if we use an indirection into altsymData -// and introduce an alt -> offset slice (the length of this will be number of -// alternatives plus 1). This also allows getting rid of the compactTag field -// in altSymData. In total this will save about 1K. -type symOffset uint16 - -type altSymData struct { - compactTag compact.ID - symIndex symOffset - system system -} diff --git a/vendor/golang.org/x/text/language/gen.go b/vendor/golang.org/x/text/language/gen.go deleted file mode 100644 index 3004eb42c1..0000000000 --- a/vendor/golang.org/x/text/language/gen.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/language" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -func main() { - gen.Init() - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "language") - - b := newBuilder(w) - gen.WriteCLDRVersion(w) - - b.writeConstants() - b.writeMatchData() -} - -type builder struct { - w *gen.CodeWriter - hw io.Writer // MultiWriter for w and w.Hash - data *cldr.CLDR - supp *cldr.SupplementalData -} - -func (b *builder) langIndex(s string) uint16 { - return uint16(language.MustParseBase(s)) -} - -func (b *builder) regionIndex(s string) int { - return int(language.MustParseRegion(s)) -} - -func (b *builder) scriptIndex(s string) int { - return int(language.MustParseScript(s)) -} - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatal(err) - } - b := builder{ - w: w, - hw: io.MultiWriter(w, w.Hash), - data: data, - supp: data.Supplemental(), - } - return &b -} - -// writeConsts computes f(v) for all v in values and writes the results -// as constants named _v to a single constant block. -func (b *builder) writeConsts(f func(string) int, values ...string) { - fmt.Fprintln(b.w, "const (") - for _, v := range values { - fmt.Fprintf(b.w, "\t_%s = %v\n", v, f(v)) - } - fmt.Fprintln(b.w, ")") -} - -// TODO: region inclusion data will probably not be use used in future matchers. - -var langConsts = []string{ - "de", "en", "fr", "it", "mo", "no", "nb", "pt", "sh", "mul", "und", -} - -var scriptConsts = []string{ - "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", - "Zzzz", -} - -var regionConsts = []string{ - "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", - "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. -} - -func (b *builder) writeConstants() { - b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) - b.writeConsts(b.regionIndex, regionConsts...) - b.writeConsts(b.scriptIndex, scriptConsts...) -} - -type mutualIntelligibility struct { - want, have uint16 - distance uint8 - oneway bool -} - -type scriptIntelligibility struct { - wantLang, haveLang uint16 - wantScript, haveScript uint8 - distance uint8 - // Always oneway -} - -type regionIntelligibility struct { - lang uint16 // compact language id - script uint8 // 0 means any - group uint8 // 0 means any; if bit 7 is set it means inverse - distance uint8 - // Always twoway. -} - -// writeMatchData writes tables with languages and scripts for which there is -// mutual intelligibility. The data is based on CLDR's languageMatching data. -// Note that we use a different algorithm than the one defined by CLDR and that -// we slightly modify the data. For example, we convert scores to confidence levels. -// We also drop all region-related data as we use a different algorithm to -// determine region equivalence. -func (b *builder) writeMatchData() { - lm := b.supp.LanguageMatching.LanguageMatches - cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new") - - regionHierarchy := map[string][]string{} - for _, g := range b.supp.TerritoryContainment.Group { - regions := strings.Split(g.Contains, " ") - regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...) - } - regionToGroups := make([]uint8, language.NumRegions) - - idToIndex := map[string]uint8{} - for i, mv := range lm[0].MatchVariable { - if i > 6 { - log.Fatalf("Too many groups: %d", i) - } - idToIndex[mv.Id] = uint8(i + 1) - // TODO: also handle '-' - for _, r := range strings.Split(mv.Value, "+") { - todo := []string{r} - for k := 0; k < len(todo); k++ { - r := todo[k] - regionToGroups[b.regionIndex(r)] |= 1 << uint8(i) - todo = append(todo, regionHierarchy[r]...) - } - } - } - b.w.WriteVar("regionToGroups", regionToGroups) - - // maps language id to in- and out-of-group region. - paradigmLocales := [][3]uint16{} - locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ") - for i := 0; i < len(locales); i += 2 { - x := [3]uint16{} - for j := 0; j < 2; j++ { - pc := strings.SplitN(locales[i+j], "-", 2) - x[0] = b.langIndex(pc[0]) - if len(pc) == 2 { - x[1+j] = uint16(b.regionIndex(pc[1])) - } - } - paradigmLocales = append(paradigmLocales, x) - } - b.w.WriteVar("paradigmLocales", paradigmLocales) - - b.w.WriteType(mutualIntelligibility{}) - b.w.WriteType(scriptIntelligibility{}) - b.w.WriteType(regionIntelligibility{}) - - matchLang := []mutualIntelligibility{} - matchScript := []scriptIntelligibility{} - matchRegion := []regionIntelligibility{} - // Convert the languageMatch entries in lists keyed by desired language. - for _, m := range lm[0].LanguageMatch { - // Different versions of CLDR use different separators. - desired := strings.Replace(m.Desired, "-", "_", -1) - supported := strings.Replace(m.Supported, "-", "_", -1) - d := strings.Split(desired, "_") - s := strings.Split(supported, "_") - if len(d) != len(s) { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - continue - } - distance, _ := strconv.ParseInt(m.Distance, 10, 8) - switch len(d) { - case 2: - if desired == supported && desired == "*_*" { - continue - } - // language-script pair. - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(d[0])), - haveLang: uint16(b.langIndex(s[0])), - wantScript: uint8(b.scriptIndex(d[1])), - haveScript: uint8(b.scriptIndex(s[1])), - distance: uint8(distance), - }) - if m.Oneway != "true" { - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(s[0])), - haveLang: uint16(b.langIndex(d[0])), - wantScript: uint8(b.scriptIndex(s[1])), - haveScript: uint8(b.scriptIndex(d[1])), - distance: uint8(distance), - }) - } - case 1: - if desired == supported && desired == "*" { - continue - } - if distance == 1 { - // nb == no is already handled by macro mapping. Check there - // really is only this case. - if d[0] != "no" || s[0] != "nb" { - log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) - } - continue - } - // TODO: consider dropping oneway field and just doubling the entry. - matchLang = append(matchLang, mutualIntelligibility{ - want: uint16(b.langIndex(d[0])), - have: uint16(b.langIndex(s[0])), - distance: uint8(distance), - oneway: m.Oneway == "true", - }) - case 3: - if desired == supported && desired == "*_*_*" { - continue - } - if desired != supported { - // This is now supported by CLDR, but only one case, which - // should already be covered by paradigm locales. For instance, - // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in - // testdata/CLDRLocaleMatcherTest.txt tests this. - if supported != "en_*_GB" { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - continue - } - ri := regionIntelligibility{ - lang: b.langIndex(d[0]), - distance: uint8(distance), - } - if d[1] != "*" { - ri.script = uint8(b.scriptIndex(d[1])) - } - switch { - case d[2] == "*": - ri.group = 0x80 // not contained in anything - case strings.HasPrefix(d[2], "$!"): - ri.group = 0x80 - d[2] = "$" + d[2][len("$!"):] - fallthrough - case strings.HasPrefix(d[2], "$"): - ri.group |= idToIndex[d[2]] - } - matchRegion = append(matchRegion, ri) - default: - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - } - sort.SliceStable(matchLang, func(i, j int) bool { - return matchLang[i].distance < matchLang[j].distance - }) - b.w.WriteComment(` - matchLang holds pairs of langIDs of base languages that are typically - mutually intelligible. Each pair is associated with a confidence and - whether the intelligibility goes one or both ways.`) - b.w.WriteVar("matchLang", matchLang) - - b.w.WriteComment(` - matchScript holds pairs of scriptIDs where readers of one script - can typically also read the other. Each is associated with a confidence.`) - sort.SliceStable(matchScript, func(i, j int) bool { - return matchScript[i].distance < matchScript[j].distance - }) - b.w.WriteVar("matchScript", matchScript) - - sort.SliceStable(matchRegion, func(i, j int) bool { - return matchRegion[i].distance < matchRegion[j].distance - }) - b.w.WriteVar("matchRegion", matchRegion) -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 987fc169cc..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 02c3b505d6..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb9942894..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 30a3aa9334..0000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See https://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - fmt.Fprintln(w, `import "sync"`) - fmt.Fprintln(w) - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap map[uint32]rune") - fmt.Fprintln(w, "var recompMapOnce sync.Once\n") - fmt.Fprintln(w, `const recompMapPacked = "" +`) - var buf [8]byte - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - binary.BigEndian.PutUint32(buf[:4], key) - binary.BigEndian.PutUint32(buf[4:], uint32(i)) - fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) - } - } - // hack so we don't have to special case the trailing plus sign - fmt.Fprintf(w, ` ""`) - fmt.Fprintln(w) - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See https://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900d..0000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8b6fcab8a9..bbbcd300c7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,12 +1,12 @@ # cloud.google.com/go v0.44.1 -cloud.google.com/go/storage -cloud.google.com/go/trace/apiv1 +cloud.google.com/go/compute/metadata cloud.google.com/go/iam cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -cloud.google.com/go/compute/metadata +cloud.google.com/go/storage +cloud.google.com/go/trace/apiv1 # contrib.go.opencensus.io/exporter/ocagent v0.6.0 contrib.go.opencensus.io/exporter/ocagent # github.com/Azure/azure-pipeline-go v0.2.1 @@ -24,8 +24,8 @@ github.com/Azure/go-autorest/autorest/azure github.com/Azure/go-autorest/autorest/date github.com/Azure/go-autorest/autorest/to github.com/Azure/go-autorest/autorest/validation -github.com/Azure/go-autorest/tracing github.com/Azure/go-autorest/logger +github.com/Azure/go-autorest/tracing # github.com/NYTimes/gziphandler v1.1.1 github.com/NYTimes/gziphandler # github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 @@ -40,39 +40,39 @@ github.com/armon/go-metrics/prometheus github.com/armon/go-radix # github.com/aws/aws-sdk-go v1.23.12 github.com/aws/aws-sdk-go/aws -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/credentials/stscreds -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/service/ec2 github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/ini -github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client -github.com/aws/aws-sdk-go/internal/sdkrand -github.com/aws/aws-sdk-go/service/sts -github.com/aws/aws-sdk-go/service/sts/stsiface github.com/aws/aws-sdk-go/aws/client/metadata github.com/aws/aws-sdk-go/aws/corehandlers -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults -github.com/aws/aws-sdk-go/aws/awsutil +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/ini +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/internal/shareddefaults github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/ec2query +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil github.com/aws/aws-sdk-go/private/protocol/query -github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds -github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/private/protocol/rest -github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/private/protocol/query/queryutil +github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/service/ec2 +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/census-instrumentation/opencensus-proto v0.2.1 @@ -92,12 +92,12 @@ github.com/dgrijalva/jwt-go github.com/edsrzf/mmap-go # github.com/elastic/go-sysinfo v1.0.1 github.com/elastic/go-sysinfo -github.com/elastic/go-sysinfo/types github.com/elastic/go-sysinfo/internal/registry github.com/elastic/go-sysinfo/providers/darwin github.com/elastic/go-sysinfo/providers/linux -github.com/elastic/go-sysinfo/providers/windows github.com/elastic/go-sysinfo/providers/shared +github.com/elastic/go-sysinfo/providers/windows +github.com/elastic/go-sysinfo/types # github.com/elastic/go-windows v1.0.0 github.com/elastic/go-windows # github.com/fatih/structtag v1.0.0 @@ -112,25 +112,25 @@ github.com/go-kit/kit/log/level # github.com/go-logfmt/logfmt v0.4.0 github.com/go-logfmt/logfmt # github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 -github.com/gogo/protobuf/proto github.com/gogo/protobuf/gogoproto -github.com/gogo/protobuf/types +github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys +github.com/gogo/protobuf/types # github.com/golang/protobuf v1.3.2 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/protoc-gen-go/generator +github.com/golang/protobuf/protoc-gen-go/generator/internal/remap +github.com/golang/protobuf/protoc-gen-go/plugin github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/timestamp -github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/jsonpb -github.com/golang/protobuf/protoc-gen-go/generator -github.com/golang/protobuf/ptypes/wrappers -github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/struct -github.com/golang/protobuf/protoc-gen-go/generator/internal/remap -github.com/golang/protobuf/protoc-gen-go/plugin +github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.1 github.com/golang/snappy # github.com/google/go-cmp v0.3.1 @@ -154,32 +154,32 @@ github.com/googleapis/gnostic/extensions # github.com/gophercloud/gophercloud v0.3.0 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack -github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers -github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects -github.com/gophercloud/gophercloud/pagination -github.com/gophercloud/gophercloud/openstack/identity/v2/tokens -github.com/gophercloud/gophercloud/openstack/identity/v3/tokens -github.com/gophercloud/gophercloud/openstack/utils -github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts -github.com/gophercloud/gophercloud/openstack/identity/v2/tenants github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors -github.com/gophercloud/gophercloud/openstack/compute/v2/servers github.com/gophercloud/gophercloud/openstack/compute/v2/flavors github.com/gophercloud/gophercloud/openstack/compute/v2/images +github.com/gophercloud/gophercloud/openstack/compute/v2/servers +github.com/gophercloud/gophercloud/openstack/identity/v2/tenants +github.com/gophercloud/gophercloud/openstack/identity/v2/tokens +github.com/gophercloud/gophercloud/openstack/identity/v3/tokens +github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts +github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers +github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects +github.com/gophercloud/gophercloud/openstack/utils +github.com/gophercloud/gophercloud/pagination # github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 github.com/grpc-ecosystem/go-grpc-middleware github.com/grpc-ecosystem/go-grpc-middleware/recovery -github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing github.com/grpc-ecosystem/go-grpc-middleware/tags +github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing github.com/grpc-ecosystem/go-grpc-middleware/util/metautils # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/go-grpc-prometheus/packages/grpcstatus # github.com/grpc-ecosystem/grpc-gateway v1.9.5 +github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities -github.com/grpc-ecosystem/grpc-gateway/internal # github.com/hashicorp/consul/api v1.1.0 github.com/hashicorp/consul/api # github.com/hashicorp/go-cleanhttp v0.5.1 @@ -189,8 +189,8 @@ github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-rootcerts v1.0.1 github.com/hashicorp/go-rootcerts # github.com/hashicorp/golang-lru v0.5.3 -github.com/hashicorp/golang-lru/simplelru github.com/hashicorp/golang-lru +github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/serf v0.8.3 github.com/hashicorp/serf/coordinate # github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af @@ -259,67 +259,67 @@ github.com/opentracing/opentracing-go/log github.com/pkg/errors # github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/internal -github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/promauto +github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus/testutil # github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/client_model/go # github.com/prometheus/common v0.6.0 +github.com/prometheus/common/config +github.com/prometheus/common/expfmt +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model github.com/prometheus/common/route github.com/prometheus/common/version -github.com/prometheus/common/expfmt -github.com/prometheus/common/config -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg # github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs # github.com/prometheus/prometheus v1.8.2-0.20190913102521-8ab628b35467 +github.com/prometheus/prometheus/config +github.com/prometheus/prometheus/discovery/azure +github.com/prometheus/prometheus/discovery/config +github.com/prometheus/prometheus/discovery/consul +github.com/prometheus/prometheus/discovery/dns +github.com/prometheus/prometheus/discovery/ec2 github.com/prometheus/prometheus/discovery/file +github.com/prometheus/prometheus/discovery/gce +github.com/prometheus/prometheus/discovery/kubernetes +github.com/prometheus/prometheus/discovery/marathon +github.com/prometheus/prometheus/discovery/openstack +github.com/prometheus/prometheus/discovery/refresh github.com/prometheus/prometheus/discovery/targetgroup +github.com/prometheus/prometheus/discovery/triton +github.com/prometheus/prometheus/discovery/zookeeper +github.com/prometheus/prometheus/pkg/gate github.com/prometheus/prometheus/pkg/labels +github.com/prometheus/prometheus/pkg/logging github.com/prometheus/prometheus/pkg/relabel github.com/prometheus/prometheus/pkg/rulefmt +github.com/prometheus/prometheus/pkg/textparse +github.com/prometheus/prometheus/pkg/timestamp +github.com/prometheus/prometheus/pkg/value +github.com/prometheus/prometheus/prompb github.com/prometheus/prometheus/promql github.com/prometheus/prometheus/rules +github.com/prometheus/prometheus/storage +github.com/prometheus/prometheus/storage/remote github.com/prometheus/prometheus/storage/tsdb +github.com/prometheus/prometheus/template github.com/prometheus/prometheus/tsdb github.com/prometheus/prometheus/tsdb/chunkenc -github.com/prometheus/prometheus/tsdb/errors -github.com/prometheus/prometheus/tsdb/labels -github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/tsdb/chunks +github.com/prometheus/prometheus/tsdb/encoding +github.com/prometheus/prometheus/tsdb/errors github.com/prometheus/prometheus/tsdb/fileutil +github.com/prometheus/prometheus/tsdb/goversion github.com/prometheus/prometheus/tsdb/index -github.com/prometheus/prometheus/pkg/value -github.com/prometheus/prometheus/pkg/timestamp -github.com/prometheus/prometheus/pkg/textparse -github.com/prometheus/prometheus/storage -github.com/prometheus/prometheus/prompb -github.com/prometheus/prometheus/pkg/gate -github.com/prometheus/prometheus/storage/remote -github.com/prometheus/prometheus/template +github.com/prometheus/prometheus/tsdb/labels +github.com/prometheus/prometheus/tsdb/wal github.com/prometheus/prometheus/util/stats +github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/util/teststorage github.com/prometheus/prometheus/util/testutil -github.com/prometheus/prometheus/tsdb/encoding -github.com/prometheus/prometheus/tsdb/goversion -github.com/prometheus/prometheus/tsdb/wal -github.com/prometheus/prometheus/config -github.com/prometheus/prometheus/pkg/logging -github.com/prometheus/prometheus/discovery/config -github.com/prometheus/prometheus/discovery/azure -github.com/prometheus/prometheus/discovery/consul -github.com/prometheus/prometheus/discovery/dns -github.com/prometheus/prometheus/discovery/ec2 -github.com/prometheus/prometheus/discovery/gce -github.com/prometheus/prometheus/discovery/kubernetes -github.com/prometheus/prometheus/discovery/marathon -github.com/prometheus/prometheus/discovery/openstack -github.com/prometheus/prometheus/discovery/triton -github.com/prometheus/prometheus/discovery/zookeeper -github.com/prometheus/prometheus/discovery/refresh github.com/prometheus/prometheus/util/treecache # github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 github.com/samuel/go-zookeeper/zk @@ -333,23 +333,23 @@ github.com/santhosh-tekuri/jsonschema/mediatypes github.com/uber/jaeger-client-go github.com/uber/jaeger-client-go/config github.com/uber/jaeger-client-go/internal/baggage +github.com/uber/jaeger-client-go/internal/baggage/remote github.com/uber/jaeger-client-go/internal/spanlog github.com/uber/jaeger-client-go/internal/throttler +github.com/uber/jaeger-client-go/internal/throttler/remote github.com/uber/jaeger-client-go/log +github.com/uber/jaeger-client-go/rpcmetrics github.com/uber/jaeger-client-go/thrift +github.com/uber/jaeger-client-go/thrift-gen/agent +github.com/uber/jaeger-client-go/thrift-gen/baggage github.com/uber/jaeger-client-go/thrift-gen/jaeger github.com/uber/jaeger-client-go/thrift-gen/sampling github.com/uber/jaeger-client-go/thrift-gen/zipkincore -github.com/uber/jaeger-client-go/utils -github.com/uber/jaeger-client-go/internal/baggage/remote -github.com/uber/jaeger-client-go/internal/throttler/remote -github.com/uber/jaeger-client-go/rpcmetrics github.com/uber/jaeger-client-go/transport -github.com/uber/jaeger-client-go/thrift-gen/agent -github.com/uber/jaeger-client-go/thrift-gen/baggage +github.com/uber/jaeger-client-go/utils # github.com/uber/jaeger-lib v2.2.0+incompatible -github.com/uber/jaeger-lib/metrics/prometheus github.com/uber/jaeger-lib/metrics +github.com/uber/jaeger-lib/metrics/prometheus # go.elastic.co/apm v1.5.0 go.elastic.co/apm go.elastic.co/apm/apmconfig @@ -359,6 +359,7 @@ go.elastic.co/apm/internal/apmhttputil go.elastic.co/apm/internal/apmlog go.elastic.co/apm/internal/apmschema go.elastic.co/apm/internal/apmstrings +go.elastic.co/apm/internal/apmversion go.elastic.co/apm/internal/configutil go.elastic.co/apm/internal/iochan go.elastic.co/apm/internal/pkgerrorsutil @@ -367,7 +368,6 @@ go.elastic.co/apm/internal/wildcard go.elastic.co/apm/model go.elastic.co/apm/stacktrace go.elastic.co/apm/transport -go.elastic.co/apm/internal/apmversion # go.elastic.co/apm/module/apmhttp v1.5.0 go.elastic.co/apm/module/apmhttp # go.elastic.co/apm/module/apmot v1.5.0 @@ -375,53 +375,53 @@ go.elastic.co/apm/module/apmot # go.elastic.co/fastjson v1.0.0 go.elastic.co/fastjson # go.opencensus.io v0.22.0 -go.opencensus.io/trace -go.opencensus.io/plugin/ochttp +go.opencensus.io go.opencensus.io/internal -go.opencensus.io/trace/internal -go.opencensus.io/trace/tracestate +go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata +go.opencensus.io/metric/metricproducer +go.opencensus.io/plugin/ocgrpc +go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/plugin/ochttp/propagation/tracecontext +go.opencensus.io/resource go.opencensus.io/stats +go.opencensus.io/stats/internal go.opencensus.io/stats/view go.opencensus.io/tag +go.opencensus.io/trace +go.opencensus.io/trace/internal go.opencensus.io/trace/propagation -go.opencensus.io/plugin/ocgrpc -go.opencensus.io -go.opencensus.io/metric/metricdata -go.opencensus.io/stats/internal -go.opencensus.io/internal/tagencoding -go.opencensus.io/metric/metricproducer -go.opencensus.io/resource -go.opencensus.io/plugin/ochttp/propagation/tracecontext +go.opencensus.io/trace/tracestate # go.uber.org/automaxprocs v1.2.0 -go.uber.org/automaxprocs/maxprocs -go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/internal/cgroups +go.uber.org/automaxprocs/internal/runtime +go.uber.org/automaxprocs/maxprocs # golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc -golang.org/x/crypto/ed25519 golang.org/x/crypto/argon2 -golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/blake2b +golang.org/x/crypto/ed25519 +golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/ssh/terminal # golang.org/x/net v0.0.0-20190923162816-aa69164e4478 -golang.org/x/net/trace -golang.org/x/net/ipv4 -golang.org/x/net/ipv6 +golang.org/x/net/bpf +golang.org/x/net/context +golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts +golang.org/x/net/http/httpproxy golang.org/x/net/http2 -golang.org/x/net/publicsuffix -golang.org/x/net/context -golang.org/x/net/internal/timeseries golang.org/x/net/http2/hpack -golang.org/x/net/bpf +golang.org/x/net/idna golang.org/x/net/internal/iana golang.org/x/net/internal/socket -golang.org/x/net/context/ctxhttp -golang.org/x/net/idna -golang.org/x/net/http/httpproxy +golang.org/x/net/internal/timeseries +golang.org/x/net/ipv4 +golang.org/x/net/ipv6 +golang.org/x/net/publicsuffix +golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 -golang.org/x/oauth2/google golang.org/x/oauth2 +golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt @@ -429,74 +429,87 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe => golang.org/x/sys v0.0.0-20190412213103-97732733099d -golang.org/x/sys/windows -golang.org/x/sys/unix golang.org/x/sys/cpu +golang.org/x/sys/unix +golang.org/x/sys/windows golang.org/x/sys/windows/registry # golang.org/x/text v0.3.2 -golang.org/x/text/language -golang.org/x/text/message -golang.org/x/text/internal/language -golang.org/x/text/internal/language/compact golang.org/x/text/feature/plural +golang.org/x/text/internal +golang.org/x/text/internal/catmsg golang.org/x/text/internal/format +golang.org/x/text/internal/language +golang.org/x/text/internal/language/compact golang.org/x/text/internal/number -golang.org/x/text/message/catalog -golang.org/x/text/internal/tag -golang.org/x/text/internal/catmsg golang.org/x/text/internal/stringset -golang.org/x/text/internal +golang.org/x/text/internal/tag +golang.org/x/text/language +golang.org/x/text/message +golang.org/x/text/message/catalog golang.org/x/text/secure/bidirule +golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -golang.org/x/text/transform # golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate # google.golang.org/api v0.11.0 +google.golang.org/api/compute/v1 +google.golang.org/api/googleapi +google.golang.org/api/googleapi/internal/uritemplates +google.golang.org/api/googleapi/transport +google.golang.org/api/internal +google.golang.org/api/internal/gensupport google.golang.org/api/iterator google.golang.org/api/option -google.golang.org/api/googleapi google.golang.org/api/storage/v1 -google.golang.org/api/transport/http -google.golang.org/api/internal -google.golang.org/api/transport google.golang.org/api/support/bundler -google.golang.org/api/googleapi/internal/uritemplates -google.golang.org/api/internal/gensupport -google.golang.org/api/googleapi/transport -google.golang.org/api/transport/http/internal/propagation +google.golang.org/api/transport google.golang.org/api/transport/grpc -google.golang.org/api/compute/v1 +google.golang.org/api/transport/http +google.golang.org/api/transport/http/internal/propagation # google.golang.org/appengine v1.6.1 google.golang.org/appengine -google.golang.org/appengine/urlfetch google.golang.org/appengine/internal google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/socket google.golang.org/appengine/internal/socket +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/socket +google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 +google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/devtools/cloudtrace/v1 -google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/iam/v1 -google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.22.1 google.golang.org/grpc -google.golang.org/grpc/codes -google.golang.org/grpc/credentials -google.golang.org/grpc/status google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb +google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/alts +google.golang.org/grpc/credentials/alts/internal +google.golang.org/grpc/credentials/alts/internal/authinfo +google.golang.org/grpc/credentials/alts/internal/conn +google.golang.org/grpc/credentials/alts/internal/handshaker +google.golang.org/grpc/credentials/alts/internal/handshaker/service +google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp +google.golang.org/grpc/credentials/google +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog @@ -508,6 +521,7 @@ google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive google.golang.org/grpc/metadata @@ -518,22 +532,8 @@ google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats +google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/balancer/base -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/internal/syscall -google.golang.org/grpc/balancer/grpclb -google.golang.org/grpc/credentials/google -google.golang.org/grpc/credentials/oauth -google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 -google.golang.org/grpc/credentials/alts -google.golang.org/grpc/credentials/alts/internal -google.golang.org/grpc/credentials/alts/internal/handshaker -google.golang.org/grpc/credentials/alts/internal/handshaker/service -google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp -google.golang.org/grpc/credentials/alts/internal/authinfo -google.golang.org/grpc/credentials/alts/internal/conn # gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 # gopkg.in/fsnotify.v1 v1.4.7 @@ -549,18 +549,16 @@ gopkg.in/yaml.v2 # howett.net/plist v0.0.0-20181124034731-591f970eefbb howett.net/plist # k8s.io/api v0.0.0-20190813020757-36bff7324fb7 => k8s.io/api v0.0.0-20190620084959-7cf5895f2711 -k8s.io/api/core/v1 -k8s.io/api/extensions/v1beta1 -k8s.io/api/apps/v1beta1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 -k8s.io/api/autoscaling/v1 +k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 k8s.io/api/auditregistration/v1alpha1 k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 +k8s.io/api/autoscaling/v1 k8s.io/api/autoscaling/v2beta1 k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 @@ -569,12 +567,14 @@ k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 -k8s.io/api/policy/v1beta1 +k8s.io/api/core/v1 k8s.io/api/events/v1beta1 +k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1alpha1 k8s.io/api/node/v1beta1 +k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 @@ -586,52 +586,49 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 # k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 => k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/pkg/api/errors +k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource -k8s.io/apimachinery/pkg/runtime/schema -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/v1 +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/streaming +k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/clock +k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/naming -k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/net -k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/runtime/serializer/streaming -k8s.io/apimachinery/pkg/api/meta -k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/clock -k8s.io/apimachinery/pkg/util/diff -k8s.io/apimachinery/pkg/util/wait -k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/validation -k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/util/validation/field -k8s.io/apimachinery/pkg/apis/meta/internalversion -k8s.io/apimachinery/pkg/runtime/serializer/json -k8s.io/apimachinery/pkg/runtime/serializer/protobuf -k8s.io/apimachinery/pkg/runtime/serializer/recognizer -k8s.io/apimachinery/pkg/runtime/serializer/versioning -k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/yaml -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/version +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.0.0-20190620085101-78d2af792bab -k8s.io/client-go/kubernetes -k8s.io/client-go/rest -k8s.io/client-go/tools/cache -k8s.io/client-go/tools/metrics -k8s.io/client-go/util/workqueue k8s.io/client-go/discovery +k8s.io/client-go/kubernetes +k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -668,27 +665,30 @@ k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 -k8s.io/client-go/util/flowcontrol +k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec +k8s.io/client-go/rest k8s.io/client-go/rest/watch +k8s.io/client-go/tools/cache k8s.io/client-go/tools/clientcmd/api -k8s.io/client-go/transport -k8s.io/client-go/util/cert +k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager -k8s.io/client-go/util/retry -k8s.io/client-go/kubernetes/scheme k8s.io/client-go/tools/reference -k8s.io/client-go/pkg/apis/clientauthentication -k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 -k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 +k8s.io/client-go/transport +k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation +k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry +k8s.io/client-go/util/workqueue # k8s.io/klog v0.4.0 => k8s.io/klog v0.3.1 k8s.io/klog # k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a k8s.io/utils/buffer -k8s.io/utils/trace k8s.io/utils/integer +k8s.io/utils/trace # sigs.k8s.io/yaml v1.1.0 sigs.k8s.io/yaml