From e94c84c71fbb5a2435b8308adf4bb37e4bd55e4d Mon Sep 17 00:00:00 2001 From: Divanshu Chauhan <23524935+Divkix@users.noreply.github.com> Date: Sun, 11 Dec 2022 02:05:54 -0700 Subject: [PATCH] remove vendor dir --- .gitignore | 21 + .../PaulSonOfLars/gotgbot/v2/.gitignore | 16 - .../PaulSonOfLars/gotgbot/v2/.golangci.yaml | 122 - .../PaulSonOfLars/gotgbot/v2/LICENSE | 21 - .../PaulSonOfLars/gotgbot/v2/README.md | 52 - .../PaulSonOfLars/gotgbot/v2/bot.go | 101 - .../gotgbot/v2/custom_helpers.go | 53 - .../PaulSonOfLars/gotgbot/v2/entities.go | 66 - .../PaulSonOfLars/gotgbot/v2/ext/context.go | 152 - .../gotgbot/v2/ext/dispatcher.go | 263 - .../PaulSonOfLars/gotgbot/v2/ext/handler.go | 12 - .../gotgbot/v2/ext/handlers/callbackquery.go | 42 - .../v2/ext/handlers/chatjoinrequest.go | 36 - .../gotgbot/v2/ext/handlers/chatmember.go | 37 - .../v2/ext/handlers/choseninlineresult.go | 37 - .../gotgbot/v2/ext/handlers/command.go | 98 - .../gotgbot/v2/ext/handlers/common.go | 8 - .../gotgbot/v2/ext/handlers/filters/types.go | 14 - .../gotgbot/v2/ext/handlers/inlinequery.go | 37 - .../gotgbot/v2/ext/handlers/message.go | 57 - .../gotgbot/v2/ext/handlers/mychatmember.go | 36 - .../gotgbot/v2/ext/handlers/poll.go | 36 - .../gotgbot/v2/ext/handlers/pollanswer.go | 36 - .../gotgbot/v2/ext/queryvalidation.go | 110 - .../PaulSonOfLars/gotgbot/v2/ext/updater.go | 313 - .../PaulSonOfLars/gotgbot/v2/formatting.go | 291 - .../PaulSonOfLars/gotgbot/v2/gen_consts.go | 37 - .../PaulSonOfLars/gotgbot/v2/gen_helpers.go | 334 - .../PaulSonOfLars/gotgbot/v2/gen_methods.go | 4912 --------- .../PaulSonOfLars/gotgbot/v2/gen_types.go | 4962 --------- .../PaulSonOfLars/gotgbot/v2/request.go | 258 - .../PaulSonOfLars/gotgbot/v2/sender.go | 132 - .../PaulSonOfLars/gotgbot/v2/spec_commit | 1 - vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 18 - vendor/github.com/golang/snappy/CONTRIBUTORS | 41 - vendor/github.com/golang/snappy/LICENSE | 27 - vendor/github.com/golang/snappy/README | 107 - vendor/github.com/golang/snappy/decode.go | 264 - .../github.com/golang/snappy/decode_amd64.s | 490 - .../github.com/golang/snappy/decode_arm64.s | 494 - vendor/github.com/golang/snappy/decode_asm.go | 15 - .../github.com/golang/snappy/decode_other.go | 115 - vendor/github.com/golang/snappy/encode.go | 289 - .../github.com/golang/snappy/encode_amd64.s | 730 -- .../github.com/golang/snappy/encode_arm64.s | 722 -- vendor/github.com/golang/snappy/encode_asm.go | 30 - .../github.com/golang/snappy/encode_other.go | 238 - vendor/github.com/golang/snappy/snappy.go | 98 - .../klauspost/compress/.gitattributes | 2 - .../github.com/klauspost/compress/.gitignore | 32 - .../klauspost/compress/.goreleaser.yml | 141 - vendor/github.com/klauspost/compress/LICENSE | 304 - .../github.com/klauspost/compress/README.md | 560 - .../klauspost/compress/compressible.go | 85 - .../klauspost/compress/fse/README.md | 79 - .../klauspost/compress/fse/bitreader.go | 122 - .../klauspost/compress/fse/bitwriter.go | 168 - .../klauspost/compress/fse/bytereader.go | 47 - .../klauspost/compress/fse/compress.go | 683 -- .../klauspost/compress/fse/decompress.go | 374 - .../github.com/klauspost/compress/fse/fse.go | 144 - vendor/github.com/klauspost/compress/gen.sh | 4 - .../klauspost/compress/huff0/.gitignore | 1 - .../klauspost/compress/huff0/README.md | 89 - .../klauspost/compress/huff0/bitreader.go | 233 - .../klauspost/compress/huff0/bitwriter.go | 95 - .../klauspost/compress/huff0/bytereader.go | 44 - .../klauspost/compress/huff0/compress.go | 730 -- .../klauspost/compress/huff0/decompress.go | 1159 -- .../compress/huff0/decompress_amd64.go | 222 - .../compress/huff0/decompress_amd64.s | 847 -- .../compress/huff0/decompress_generic.go | 295 - .../klauspost/compress/huff0/huff0.go | 337 - .../compress/internal/cpuinfo/cpuinfo.go | 34 - .../internal/cpuinfo/cpuinfo_amd64.go | 11 - .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 - .../compress/internal/snapref/LICENSE | 27 - .../compress/internal/snapref/decode.go | 264 - .../compress/internal/snapref/decode_other.go | 113 - .../compress/internal/snapref/encode.go | 289 - .../compress/internal/snapref/encode_other.go | 236 - .../compress/internal/snapref/snappy.go | 98 - vendor/github.com/klauspost/compress/s2sx.mod | 4 - vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 439 - .../klauspost/compress/zstd/bitreader.go | 140 - .../klauspost/compress/zstd/bitwriter.go | 113 - .../klauspost/compress/zstd/blockdec.go | 721 -- .../klauspost/compress/zstd/blockenc.go | 871 -- .../compress/zstd/blocktype_string.go | 85 - .../klauspost/compress/zstd/bytebuf.go | 132 - .../klauspost/compress/zstd/bytereader.go | 82 - .../klauspost/compress/zstd/decodeheader.go | 230 - .../klauspost/compress/zstd/decoder.go | 924 -- .../compress/zstd/decoder_options.go | 123 - .../klauspost/compress/zstd/dict.go | 122 - .../klauspost/compress/zstd/enc_base.go | 188 - .../klauspost/compress/zstd/enc_best.go | 558 - .../klauspost/compress/zstd/enc_better.go | 1237 --- .../klauspost/compress/zstd/enc_dfast.go | 1124 -- .../klauspost/compress/zstd/enc_fast.go | 898 -- .../klauspost/compress/zstd/encoder.go | 641 -- .../compress/zstd/encoder_options.go | 317 - .../klauspost/compress/zstd/framedec.go | 419 - .../klauspost/compress/zstd/frameenc.go | 137 - .../klauspost/compress/zstd/fse_decoder.go | 307 - .../compress/zstd/fse_decoder_amd64.go | 64 - .../compress/zstd/fse_decoder_amd64.s | 127 - .../compress/zstd/fse_decoder_generic.go | 72 - .../klauspost/compress/zstd/fse_encoder.go | 701 -- .../klauspost/compress/zstd/fse_predefined.go | 158 - .../klauspost/compress/zstd/hash.go | 35 - .../klauspost/compress/zstd/history.go | 119 - .../compress/zstd/internal/xxhash/LICENSE.txt | 22 - .../compress/zstd/internal/xxhash/README.md | 58 - .../compress/zstd/internal/xxhash/xxhash.go | 237 - .../zstd/internal/xxhash/xxhash_amd64.s | 216 - .../zstd/internal/xxhash/xxhash_arm64.s | 186 - .../zstd/internal/xxhash/xxhash_asm.go | 16 - .../zstd/internal/xxhash/xxhash_other.go | 77 - .../zstd/internal/xxhash/xxhash_safe.go | 11 - .../klauspost/compress/zstd/seqdec.go | 491 - .../klauspost/compress/zstd/seqdec_amd64.go | 368 - .../klauspost/compress/zstd/seqdec_amd64.s | 4100 ------- .../klauspost/compress/zstd/seqdec_generic.go | 237 - .../klauspost/compress/zstd/seqenc.go | 114 - .../klauspost/compress/zstd/snappy.go | 435 - .../github.com/klauspost/compress/zstd/zip.go | 141 - .../klauspost/compress/zstd/zstd.go | 152 - .../github.com/montanaflynn/stats/.gitignore | 5 - .../github.com/montanaflynn/stats/.travis.yml | 29 - .../montanaflynn/stats/CHANGELOG.md | 598 -- .../montanaflynn/stats/DOCUMENTATION.md | 1237 --- vendor/github.com/montanaflynn/stats/LICENSE | 21 - vendor/github.com/montanaflynn/stats/Makefile | 34 - .../github.com/montanaflynn/stats/README.md | 228 - .../montanaflynn/stats/correlation.go | 60 - .../montanaflynn/stats/cumulative_sum.go | 21 - vendor/github.com/montanaflynn/stats/data.go | 169 - .../montanaflynn/stats/deviation.go | 57 - .../montanaflynn/stats/distances.go | 88 - vendor/github.com/montanaflynn/stats/doc.go | 23 - .../github.com/montanaflynn/stats/entropy.go | 31 - .../github.com/montanaflynn/stats/errors.go | 35 - .../github.com/montanaflynn/stats/legacy.go | 49 - vendor/github.com/montanaflynn/stats/load.go | 199 - vendor/github.com/montanaflynn/stats/max.go | 26 - vendor/github.com/montanaflynn/stats/mean.go | 60 - .../github.com/montanaflynn/stats/median.go | 25 - vendor/github.com/montanaflynn/stats/min.go | 26 - vendor/github.com/montanaflynn/stats/mode.go | 47 - vendor/github.com/montanaflynn/stats/norm.go | 254 - .../github.com/montanaflynn/stats/outlier.go | 44 - .../montanaflynn/stats/percentile.go | 86 - .../github.com/montanaflynn/stats/quartile.go | 74 - .../github.com/montanaflynn/stats/ranksum.go | 183 - .../montanaflynn/stats/regression.go | 113 - vendor/github.com/montanaflynn/stats/round.go | 38 - .../github.com/montanaflynn/stats/sample.go | 76 - .../github.com/montanaflynn/stats/sigmoid.go | 18 - .../github.com/montanaflynn/stats/softmax.go | 25 - vendor/github.com/montanaflynn/stats/sum.go | 18 - vendor/github.com/montanaflynn/stats/util.go | 43 - .../github.com/montanaflynn/stats/variance.go | 105 - vendor/github.com/pkg/errors/.gitignore | 24 - vendor/github.com/pkg/errors/.travis.yml | 10 - vendor/github.com/pkg/errors/LICENSE | 23 - vendor/github.com/pkg/errors/Makefile | 44 - vendor/github.com/pkg/errors/README.md | 59 - vendor/github.com/pkg/errors/appveyor.yml | 32 - vendor/github.com/pkg/errors/errors.go | 288 - vendor/github.com/pkg/errors/go113.go | 38 - vendor/github.com/pkg/errors/stack.go | 177 - vendor/github.com/sirupsen/logrus/.gitignore | 4 - .../github.com/sirupsen/logrus/.golangci.yml | 40 - vendor/github.com/sirupsen/logrus/.travis.yml | 15 - .../github.com/sirupsen/logrus/CHANGELOG.md | 259 - vendor/github.com/sirupsen/logrus/LICENSE | 21 - vendor/github.com/sirupsen/logrus/README.md | 513 - vendor/github.com/sirupsen/logrus/alt_exit.go | 76 - .../github.com/sirupsen/logrus/appveyor.yml | 14 - .../github.com/sirupsen/logrus/buffer_pool.go | 43 - vendor/github.com/sirupsen/logrus/doc.go | 26 - vendor/github.com/sirupsen/logrus/entry.go | 442 - vendor/github.com/sirupsen/logrus/exported.go | 270 - .../github.com/sirupsen/logrus/formatter.go | 78 - vendor/github.com/sirupsen/logrus/hooks.go | 34 - .../sirupsen/logrus/json_formatter.go | 128 - vendor/github.com/sirupsen/logrus/logger.go | 417 - vendor/github.com/sirupsen/logrus/logrus.go | 186 - .../logrus/terminal_check_appengine.go | 11 - .../sirupsen/logrus/terminal_check_bsd.go | 13 - .../sirupsen/logrus/terminal_check_js.go | 7 - .../logrus/terminal_check_no_terminal.go | 11 - .../logrus/terminal_check_notappengine.go | 17 - .../sirupsen/logrus/terminal_check_solaris.go | 11 - .../sirupsen/logrus/terminal_check_unix.go | 13 - .../sirupsen/logrus/terminal_check_windows.go | 27 - .../sirupsen/logrus/text_formatter.go | 339 - vendor/github.com/sirupsen/logrus/writer.go | 70 - vendor/github.com/xdg-go/pbkdf2/.gitignore | 12 - vendor/github.com/xdg-go/pbkdf2/LICENSE | 175 - vendor/github.com/xdg-go/pbkdf2/README.md | 17 - vendor/github.com/xdg-go/pbkdf2/pbkdf2.go | 76 - vendor/github.com/xdg-go/scram/.gitignore | 0 vendor/github.com/xdg-go/scram/CHANGELOG.md | 22 - vendor/github.com/xdg-go/scram/LICENSE | 175 - vendor/github.com/xdg-go/scram/README.md | 72 - vendor/github.com/xdg-go/scram/client.go | 130 - vendor/github.com/xdg-go/scram/client_conv.go | 149 - vendor/github.com/xdg-go/scram/common.go | 97 - vendor/github.com/xdg-go/scram/doc.go | 26 - vendor/github.com/xdg-go/scram/parse.go | 205 - vendor/github.com/xdg-go/scram/scram.go | 71 - vendor/github.com/xdg-go/scram/server.go | 50 - vendor/github.com/xdg-go/scram/server_conv.go | 151 - .../github.com/xdg-go/stringprep/.gitignore | 0 .../github.com/xdg-go/stringprep/CHANGELOG.md | 29 - vendor/github.com/xdg-go/stringprep/LICENSE | 175 - vendor/github.com/xdg-go/stringprep/README.md | 28 - vendor/github.com/xdg-go/stringprep/bidi.go | 73 - vendor/github.com/xdg-go/stringprep/doc.go | 10 - vendor/github.com/xdg-go/stringprep/error.go | 14 - vendor/github.com/xdg-go/stringprep/map.go | 21 - .../github.com/xdg-go/stringprep/profile.go | 75 - .../github.com/xdg-go/stringprep/saslprep.go | 52 - vendor/github.com/xdg-go/stringprep/set.go | 36 - vendor/github.com/xdg-go/stringprep/tables.go | 3215 ------ vendor/github.com/youmark/pkcs8/.gitignore | 23 - vendor/github.com/youmark/pkcs8/.travis.yml | 14 - vendor/github.com/youmark/pkcs8/LICENSE | 21 - vendor/github.com/youmark/pkcs8/README | 1 - vendor/github.com/youmark/pkcs8/README.md | 22 - vendor/github.com/youmark/pkcs8/cipher.go | 60 - .../github.com/youmark/pkcs8/cipher_3des.go | 24 - vendor/github.com/youmark/pkcs8/cipher_aes.go | 84 - vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go | 91 - vendor/github.com/youmark/pkcs8/kdf_scrypt.go | 62 - vendor/github.com/youmark/pkcs8/pkcs8.go | 309 - vendor/go.mongodb.org/mongo-driver/LICENSE | 201 - .../go.mongodb.org/mongo-driver/bson/bson.go | 50 - .../bson/bsoncodec/array_codec.go | 50 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 238 - .../bson/bsoncodec/byte_slice_codec.go | 111 - .../bson/bsoncodec/cond_addr_codec.go | 63 - .../bson/bsoncodec/default_value_decoders.go | 1729 --- .../bson/bsoncodec/default_value_encoders.go | 766 -- .../mongo-driver/bson/bsoncodec/doc.go | 90 - .../bson/bsoncodec/empty_interface_codec.go | 147 - .../mongo-driver/bson/bsoncodec/map_codec.go | 309 - .../mongo-driver/bson/bsoncodec/mode.go | 65 - .../bson/bsoncodec/pointer_codec.go | 109 - .../mongo-driver/bson/bsoncodec/proxy.go | 14 - .../mongo-driver/bson/bsoncodec/registry.go | 469 - .../bson/bsoncodec/slice_codec.go | 199 - .../bson/bsoncodec/string_codec.go | 119 - .../bson/bsoncodec/struct_codec.go | 664 -- .../bson/bsoncodec/struct_tag_parser.go | 139 - .../mongo-driver/bson/bsoncodec/time_codec.go | 127 - .../mongo-driver/bson/bsoncodec/types.go | 57 - .../mongo-driver/bson/bsoncodec/uint_codec.go | 173 - .../bsonoptions/byte_slice_codec_options.go | 38 - .../mongo-driver/bson/bsonoptions/doc.go | 8 - .../empty_interface_codec_options.go | 38 - .../bson/bsonoptions/map_codec_options.go | 67 - .../bson/bsonoptions/slice_codec_options.go | 38 - .../bson/bsonoptions/string_codec_options.go | 41 - .../bson/bsonoptions/struct_codec_options.go | 87 - .../bson/bsonoptions/time_codec_options.go | 38 - .../bson/bsonoptions/uint_codec_options.go | 38 - .../mongo-driver/bson/bsonrw/copier.go | 445 - .../mongo-driver/bson/bsonrw/doc.go | 9 - .../bson/bsonrw/extjson_parser.go | 806 -- .../bson/bsonrw/extjson_reader.go | 644 -- .../bson/bsonrw/extjson_tables.go | 223 - .../bson/bsonrw/extjson_wrappers.go | 492 - .../bson/bsonrw/extjson_writer.go | 732 -- .../mongo-driver/bson/bsonrw/json_scanner.go | 528 - .../mongo-driver/bson/bsonrw/mode.go | 108 - .../mongo-driver/bson/bsonrw/reader.go | 63 - .../mongo-driver/bson/bsonrw/value_reader.go | 874 -- .../mongo-driver/bson/bsonrw/value_writer.go | 606 -- .../mongo-driver/bson/bsonrw/writer.go | 78 - .../mongo-driver/bson/bsontype/bsontype.go | 97 - .../mongo-driver/bson/decoder.go | 141 - .../go.mongodb.org/mongo-driver/bson/doc.go | 141 - .../mongo-driver/bson/encoder.go | 99 - .../mongo-driver/bson/marshal.go | 248 - .../mongo-driver/bson/primitive/decimal.go | 423 - .../mongo-driver/bson/primitive/objectid.go | 206 - .../mongo-driver/bson/primitive/primitive.go | 217 - .../mongo-driver/bson/primitive_codecs.go | 92 - .../go.mongodb.org/mongo-driver/bson/raw.go | 85 - .../mongo-driver/bson/raw_element.go | 51 - .../mongo-driver/bson/raw_value.go | 309 - .../mongo-driver/bson/registry.go | 24 - .../go.mongodb.org/mongo-driver/bson/types.go | 36 - .../mongo-driver/bson/unmarshal.go | 101 - .../go.mongodb.org/mongo-driver/event/doc.go | 56 - .../mongo-driver/event/monitoring.go | 190 - .../internal/background_context.go | 34 - .../internal/cancellation_listener.go | 47 - .../mongo-driver/internal/const.go | 19 - .../mongo-driver/internal/csfle_util.go | 39 - .../mongo-driver/internal/csot_util.go | 58 - .../mongo-driver/internal/error.go | 123 - .../mongo-driver/internal/http.go | 38 - .../internal/randutil/rand/bits.go | 38 - .../internal/randutil/rand/exp.go | 223 - .../internal/randutil/rand/normal.go | 158 - .../internal/randutil/rand/rand.go | 374 - .../internal/randutil/rand/rng.go | 93 - .../internal/randutil/randutil.go | 39 - .../mongo-driver/internal/string_util.go | 45 - .../internal/uri_validation_errors.go | 22 - .../mongo-driver/internal/uuid/uuid.go | 53 - .../mongo-driver/mongo/address/addr.go | 50 - .../mongo-driver/mongo/batch_cursor.go | 48 - .../mongo-driver/mongo/bulk_write.go | 532 - .../mongo-driver/mongo/bulk_write_models.go | 305 - .../mongo-driver/mongo/change_stream.go | 710 -- .../mongo/change_stream_deployment.go | 49 - .../mongo-driver/mongo/client.go | 822 -- .../mongo-driver/mongo/client_encryption.go | 315 - .../mongo-driver/mongo/collection.go | 1880 ---- .../mongo-driver/mongo/crypt_retrievers.go | 65 - .../mongo-driver/mongo/cursor.go | 325 - .../mongo-driver/mongo/database.go | 810 -- .../mongo/description/description.go | 11 - .../mongo-driver/mongo/description/server.go | 488 - .../mongo/description/server_kind.go | 46 - .../mongo/description/server_selector.go | 341 - .../mongo/description/topology.go | 142 - .../mongo/description/topology_kind.go | 40 - .../mongo/description/topology_version.go | 66 - .../mongo/description/version_range.go | 42 - .../go.mongodb.org/mongo-driver/mongo/doc.go | 152 - .../mongo-driver/mongo/errors.go | 665 -- .../mongo/index_options_builder.go | 176 - .../mongo-driver/mongo/index_view.go | 497 - .../mongo-driver/mongo/mongo.go | 401 - .../mongo-driver/mongo/mongocryptd.go | 162 - .../mongo/options/aggregateoptions.go | 181 - .../mongo/options/autoencryptionoptions.go | 209 - .../mongo/options/bulkwriteoptions.go | 91 - .../mongo/options/changestreamoptions.go | 205 - .../mongo/options/clientencryptionoptions.go | 147 - .../mongo/options/clientoptions.go | 1153 -- .../mongo/options/collectionoptions.go | 88 - .../mongo/options/countoptions.go | 119 - .../mongo/options/createcollectionoptions.go | 326 - .../mongo/options/datakeyoptions.go | 101 - .../mongo-driver/mongo/options/dboptions.go | 88 - .../mongo/options/deleteoptions.go | 86 - .../mongo/options/distinctoptions.go | 78 - .../mongo-driver/mongo/options/doc.go | 8 - .../mongo/options/encryptoptions.go | 103 - .../mongo/options/estimatedcountoptions.go | 64 - .../mongo-driver/mongo/options/findoptions.go | 1095 -- .../mongo/options/gridfsoptions.go | 329 - .../mongo/options/indexoptions.go | 482 - .../mongo/options/insertoptions.go | 119 - .../mongo/options/listcollectionsoptions.go | 66 - .../mongo/options/listdatabasesoptions.go | 55 - .../mongo/options/mongooptions.go | 165 - .../mongo/options/replaceoptions.go | 115 - .../mongo/options/rewrapdatakeyoptions.go | 52 - .../mongo/options/runcmdoptions.go | 42 - .../mongo/options/serverapioptions.go | 60 - .../mongo/options/sessionoptions.go | 131 - .../mongo/options/transactionoptions.go | 100 - .../mongo/options/updateoptions.go | 128 - .../mongo/readconcern/readconcern.go | 83 - .../mongo-driver/mongo/readpref/mode.go | 88 - .../mongo-driver/mongo/readpref/options.go | 71 - .../mongo-driver/mongo/readpref/readpref.go | 135 - .../mongo-driver/mongo/results.go | 281 - .../mongo-driver/mongo/session.go | 385 - .../mongo-driver/mongo/single_result.go | 121 - .../go.mongodb.org/mongo-driver/mongo/util.go | 7 - .../mongo/writeconcern/writeconcern.go | 227 - vendor/go.mongodb.org/mongo-driver/tag/tag.go | 80 - .../mongo-driver/version/version.go | 11 - .../mongo-driver/x/bsonx/array.go | 97 - .../mongo-driver/x/bsonx/bsoncore/array.go | 164 - .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 - .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 - .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 862 -- .../mongo-driver/x/bsonx/bsoncore/document.go | 386 - .../x/bsonx/bsoncore/document_sequence.go | 189 - .../mongo-driver/x/bsonx/bsoncore/element.go | 152 - .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 - .../mongo-driver/x/bsonx/bsoncore/value.go | 980 -- .../mongo-driver/x/bsonx/constructor.go | 166 - .../mongo-driver/x/bsonx/document.go | 305 - .../mongo-driver/x/bsonx/element.go | 51 - .../mongo-driver/x/bsonx/mdocument.go | 231 - .../mongo-driver/x/bsonx/primitive_codecs.go | 637 -- .../x/bsonx/reflectionfree_d_codec.go | 1025 -- .../mongo-driver/x/bsonx/registry.go | 28 - .../mongo-driver/x/bsonx/value.go | 866 -- .../mongo-driver/x/mongo/driver/DESIGN.md | 23 - .../mongo-driver/x/mongo/driver/auth/auth.go | 229 - .../x/mongo/driver/auth/aws_conv.go | 348 - .../x/mongo/driver/auth/conversation.go | 31 - .../mongo-driver/x/mongo/driver/auth/cred.go | 16 - .../x/mongo/driver/auth/default.go | 98 - .../mongo-driver/x/mongo/driver/auth/doc.go | 23 - .../x/mongo/driver/auth/gssapi.go | 59 - .../x/mongo/driver/auth/gssapi_not_enabled.go | 17 - .../mongo/driver/auth/gssapi_not_supported.go | 22 - .../driver/auth/internal/awsv4/credentials.go | 63 - .../x/mongo/driver/auth/internal/awsv4/doc.go | 15 - .../driver/auth/internal/awsv4/request.go | 80 - .../mongo/driver/auth/internal/awsv4/rest.go | 46 - .../mongo/driver/auth/internal/awsv4/rules.go | 98 - .../driver/auth/internal/awsv4/signer.go | 472 - .../mongo/driver/auth/internal/gssapi/gss.go | 167 - .../driver/auth/internal/gssapi/gss_wrapper.c | 254 - .../driver/auth/internal/gssapi/gss_wrapper.h | 72 - .../mongo/driver/auth/internal/gssapi/sspi.go | 353 - .../auth/internal/gssapi/sspi_wrapper.c | 249 - .../auth/internal/gssapi/sspi_wrapper.h | 64 - .../x/mongo/driver/auth/mongodbaws.go | 82 - .../x/mongo/driver/auth/mongodbcr.go | 110 - .../mongo-driver/x/mongo/driver/auth/plain.go | 55 - .../mongo-driver/x/mongo/driver/auth/sasl.go | 174 - .../mongo-driver/x/mongo/driver/auth/scram.go | 130 - .../mongo-driver/x/mongo/driver/auth/util.go | 30 - .../mongo-driver/x/mongo/driver/auth/x509.go | 85 - .../x/mongo/driver/batch_cursor.go | 465 - .../mongo-driver/x/mongo/driver/batches.go | 76 - .../x/mongo/driver/compression.go | 111 - .../x/mongo/driver/connstring/connstring.go | 1036 -- .../mongo-driver/x/mongo/driver/crypt.go | 473 - .../mongo-driver/x/mongo/driver/dns/dns.go | 144 - .../mongo-driver/x/mongo/driver/driver.go | 274 - .../mongo-driver/x/mongo/driver/errors.go | 520 - .../mongo-driver/x/mongo/driver/legacy.go | 22 - .../driver/list_collections_batch_cursor.go | 129 - .../x/mongo/driver/mongocrypt/binary.go | 56 - .../x/mongo/driver/mongocrypt/errors.go | 44 - .../driver/mongocrypt/errors_not_enabled.go | 21 - .../x/mongo/driver/mongocrypt/mongocrypt.go | 416 - .../driver/mongocrypt/mongocrypt_context.go | 115 - .../mongocrypt_context_not_enabled.go | 62 - .../mongocrypt/mongocrypt_kms_context.go | 76 - .../mongocrypt_kms_context_not_enabled.go | 39 - .../mongocrypt/mongocrypt_not_enabled.go | 83 - .../options/mongocrypt_context_options.go | 139 - .../mongocrypt/options/mongocrypt_options.go | 63 - .../x/mongo/driver/mongocrypt/state.go | 47 - .../mongo-driver/x/mongo/driver/ocsp/cache.go | 121 - .../x/mongo/driver/ocsp/config.go | 68 - .../mongo-driver/x/mongo/driver/ocsp/ocsp.go | 321 - .../x/mongo/driver/ocsp/options.go | 16 - .../mongo-driver/x/mongo/driver/operation.go | 1778 ---- .../driver/operation/abort_transaction.go | 199 - .../x/mongo/driver/operation/aggregate.go | 419 - .../x/mongo/driver/operation/command.go | 220 - .../driver/operation/commit_transaction.go | 201 - .../x/mongo/driver/operation/count.go | 311 - .../x/mongo/driver/operation/create.go | 402 - .../x/mongo/driver/operation/createIndexes.go | 278 - .../x/mongo/driver/operation/delete.go | 314 - .../x/mongo/driver/operation/distinct.go | 311 - .../mongo/driver/operation/drop_collection.go | 222 - .../x/mongo/driver/operation/drop_database.go | 154 - .../x/mongo/driver/operation/drop_indexes.go | 242 - .../x/mongo/driver/operation/end_sessions.go | 161 - .../x/mongo/driver/operation/errors.go | 13 - .../x/mongo/driver/operation/find.go | 548 - .../mongo/driver/operation/find_and_modify.go | 477 - .../x/mongo/driver/operation/hello.go | 258 - .../x/mongo/driver/operation/insert.go | 293 - .../x/mongo/driver/operation/listDatabases.go | 327 - .../driver/operation/list_collections.go | 266 - .../x/mongo/driver/operation/list_indexes.go | 233 - .../x/mongo/driver/operation/update.go | 401 - .../x/mongo/driver/operation_exhaust.go | 43 - .../x/mongo/driver/serverapioptions.go | 36 - .../x/mongo/driver/session/client_session.go | 538 - .../x/mongo/driver/session/cluster_clock.go | 36 - .../x/mongo/driver/session/options.go | 62 - .../x/mongo/driver/session/server_session.go | 74 - .../x/mongo/driver/session/session_pool.go | 192 - .../x/mongo/driver/topology/DESIGN.md | 40 - .../driver/topology/cancellation_listener.go | 14 - .../x/mongo/driver/topology/connection.go | 825 -- .../driver/topology/connection_legacy.go | 7 - .../driver/topology/connection_options.go | 214 - .../x/mongo/driver/topology/diff.go | 73 - .../x/mongo/driver/topology/errors.go | 111 - .../x/mongo/driver/topology/fsm.go | 438 - .../x/mongo/driver/topology/pool.go | 1135 -- .../topology/pool_generation_counter.go | 152 - .../x/mongo/driver/topology/rtt_monitor.go | 307 - .../x/mongo/driver/topology/server.go | 957 -- .../x/mongo/driver/topology/server_options.go | 195 - .../topology/tls_connection_source_1_16.go | 58 - .../topology/tls_connection_source_1_17.go | 47 - .../x/mongo/driver/topology/topology.go | 851 -- .../mongo/driver/topology/topology_options.go | 344 - .../x/mongo/driver/wiremessage/wiremessage.go | 600 -- vendor/golang.org/x/crypto/LICENSE | 27 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/ocsp/ocsp.go | 792 -- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - vendor/golang.org/x/crypto/scrypt/scrypt.go | 212 - vendor/golang.org/x/sync/LICENSE | 27 - vendor/golang.org/x/sync/PATENTS | 22 - vendor/golang.org/x/sync/errgroup/errgroup.go | 132 - vendor/golang.org/x/sys/LICENSE | 27 - vendor/golang.org/x/sys/PATENTS | 22 - .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/unix/.gitignore | 2 - vendor/golang.org/x/sys/unix/README.md | 184 - .../golang.org/x/sys/unix/affinity_linux.go | 86 - vendor/golang.org/x/sys/unix/aliases.go | 15 - vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 18 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 29 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 29 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 29 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 66 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 58 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 57 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 53 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 57 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 55 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 45 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 49 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 57 - .../x/sys/unix/asm_openbsd_mips64.s | 30 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 18 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 - .../golang.org/x/sys/unix/bluetooth_linux.go | 36 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 196 - vendor/golang.org/x/sys/unix/constants.go | 14 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 - vendor/golang.org/x/sys/unix/dev_darwin.go | 24 - vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 - vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 - vendor/golang.org/x/sys/unix/dev_linux.go | 42 - vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 - vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 - vendor/golang.org/x/sys/unix/dev_zos.go | 29 - vendor/golang.org/x/sys/unix/dirent.go | 103 - vendor/golang.org/x/sys/unix/endian_big.go | 10 - vendor/golang.org/x/sys/unix/endian_little.go | 10 - vendor/golang.org/x/sys/unix/env_unix.go | 32 - vendor/golang.org/x/sys/unix/epoll_zos.go | 221 - vendor/golang.org/x/sys/unix/fcntl.go | 37 - vendor/golang.org/x/sys/unix/fcntl_darwin.go | 24 - .../x/sys/unix/fcntl_linux_32bit.go | 14 - vendor/golang.org/x/sys/unix/fdset.go | 30 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 - vendor/golang.org/x/sys/unix/gccgo.go | 60 - vendor/golang.org/x/sys/unix/gccgo_c.c | 45 - .../x/sys/unix/gccgo_linux_amd64.go | 21 - vendor/golang.org/x/sys/unix/ifreq_linux.go | 142 - vendor/golang.org/x/sys/unix/ioctl.go | 75 - vendor/golang.org/x/sys/unix/ioctl_linux.go | 233 - vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 - vendor/golang.org/x/sys/unix/mkall.sh | 236 - vendor/golang.org/x/sys/unix/mkerrors.sh | 778 -- vendor/golang.org/x/sys/unix/pagesize_unix.go | 16 - .../golang.org/x/sys/unix/pledge_openbsd.go | 163 - vendor/golang.org/x/sys/unix/ptrace_darwin.go | 12 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 12 - vendor/golang.org/x/sys/unix/race.go | 31 - vendor/golang.org/x/sys/unix/race0.go | 26 - .../x/sys/unix/readdirent_getdents.go | 13 - .../x/sys/unix/readdirent_getdirentries.go | 20 - .../x/sys/unix/sockcmsg_dragonfly.go | 16 - .../golang.org/x/sys/unix/sockcmsg_linux.go | 85 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 93 - .../x/sys/unix/sockcmsg_unix_other.go | 47 - vendor/golang.org/x/sys/unix/syscall.go | 95 - vendor/golang.org/x/sys/unix/syscall_aix.go | 600 -- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 - .../x/sys/unix/syscall_aix_ppc64.go | 85 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 625 -- .../x/sys/unix/syscall_darwin.1_12.go | 32 - .../x/sys/unix/syscall_darwin.1_13.go | 108 - .../golang.org/x/sys/unix/syscall_darwin.go | 740 -- .../x/sys/unix/syscall_darwin_amd64.go | 51 - .../x/sys/unix/syscall_darwin_arm64.go | 51 - .../x/sys/unix/syscall_darwin_libSystem.go | 27 - .../x/sys/unix/syscall_dragonfly.go | 544 - .../x/sys/unix/syscall_dragonfly_amd64.go | 57 - .../golang.org/x/sys/unix/syscall_freebsd.go | 614 -- .../x/sys/unix/syscall_freebsd_386.go | 67 - .../x/sys/unix/syscall_freebsd_amd64.go | 67 - .../x/sys/unix/syscall_freebsd_arm.go | 63 - .../x/sys/unix/syscall_freebsd_arm64.go | 63 - .../x/sys/unix/syscall_freebsd_riscv64.go | 63 - .../golang.org/x/sys/unix/syscall_illumos.go | 185 - vendor/golang.org/x/sys/unix/syscall_linux.go | 2456 ----- .../x/sys/unix/syscall_linux_386.go | 346 - .../x/sys/unix/syscall_linux_alarm.go | 14 - .../x/sys/unix/syscall_linux_amd64.go | 151 - .../x/sys/unix/syscall_linux_amd64_gc.go | 13 - .../x/sys/unix/syscall_linux_arm.go | 248 - .../x/sys/unix/syscall_linux_arm64.go | 199 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 15 - .../x/sys/unix/syscall_linux_gc_386.go | 17 - .../x/sys/unix/syscall_linux_gc_arm.go | 14 - .../x/sys/unix/syscall_linux_gccgo_386.go | 31 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 21 - .../x/sys/unix/syscall_linux_loong64.go | 226 - .../x/sys/unix/syscall_linux_mips64x.go | 195 - .../x/sys/unix/syscall_linux_mipsx.go | 207 - .../x/sys/unix/syscall_linux_ppc.go | 236 - .../x/sys/unix/syscall_linux_ppc64x.go | 122 - .../x/sys/unix/syscall_linux_riscv64.go | 184 - .../x/sys/unix/syscall_linux_s390x.go | 302 - .../x/sys/unix/syscall_linux_sparc64.go | 118 - .../golang.org/x/sys/unix/syscall_netbsd.go | 609 -- .../x/sys/unix/syscall_netbsd_386.go | 38 - .../x/sys/unix/syscall_netbsd_amd64.go | 38 - .../x/sys/unix/syscall_netbsd_arm.go | 38 - .../x/sys/unix/syscall_netbsd_arm64.go | 38 - .../golang.org/x/sys/unix/syscall_openbsd.go | 389 - .../x/sys/unix/syscall_openbsd_386.go | 42 - .../x/sys/unix/syscall_openbsd_amd64.go | 42 - .../x/sys/unix/syscall_openbsd_arm.go | 42 - .../x/sys/unix/syscall_openbsd_arm64.go | 42 - .../x/sys/unix/syscall_openbsd_libc.go | 27 - .../x/sys/unix/syscall_openbsd_mips64.go | 39 - .../golang.org/x/sys/unix/syscall_solaris.go | 1005 -- .../x/sys/unix/syscall_solaris_amd64.go | 28 - vendor/golang.org/x/sys/unix/syscall_unix.go | 556 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 18 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 25 - .../x/sys/unix/syscall_zos_s390x.go | 1823 ---- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 - vendor/golang.org/x/sys/unix/sysvshm_unix.go | 61 - .../x/sys/unix/sysvshm_unix_other.go | 14 - vendor/golang.org/x/sys/unix/timestruct.go | 77 - .../golang.org/x/sys/unix/unveil_openbsd.go | 42 - vendor/golang.org/x/sys/unix/xattr_bsd.go | 241 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1385 --- .../x/sys/unix/zerrors_aix_ppc64.go | 1386 --- .../x/sys/unix/zerrors_darwin_amd64.go | 1892 ---- .../x/sys/unix/zerrors_darwin_arm64.go | 1892 ---- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1738 --- .../x/sys/unix/zerrors_freebsd_386.go | 2043 ---- .../x/sys/unix/zerrors_freebsd_amd64.go | 2040 ---- .../x/sys/unix/zerrors_freebsd_arm.go | 2034 ---- .../x/sys/unix/zerrors_freebsd_arm64.go | 2034 ---- .../x/sys/unix/zerrors_freebsd_riscv64.go | 2148 ---- vendor/golang.org/x/sys/unix/zerrors_linux.go | 3457 ------ .../x/sys/unix/zerrors_linux_386.go | 828 -- .../x/sys/unix/zerrors_linux_amd64.go | 828 -- .../x/sys/unix/zerrors_linux_arm.go | 834 -- .../x/sys/unix/zerrors_linux_arm64.go | 826 -- .../x/sys/unix/zerrors_linux_loong64.go | 818 -- .../x/sys/unix/zerrors_linux_mips.go | 835 -- .../x/sys/unix/zerrors_linux_mips64.go | 835 -- .../x/sys/unix/zerrors_linux_mips64le.go | 835 -- .../x/sys/unix/zerrors_linux_mipsle.go | 835 -- .../x/sys/unix/zerrors_linux_ppc.go | 887 -- .../x/sys/unix/zerrors_linux_ppc64.go | 891 -- .../x/sys/unix/zerrors_linux_ppc64le.go | 891 -- .../x/sys/unix/zerrors_linux_riscv64.go | 815 -- .../x/sys/unix/zerrors_linux_s390x.go | 890 -- .../x/sys/unix/zerrors_linux_sparc64.go | 885 -- .../x/sys/unix/zerrors_netbsd_386.go | 1780 ---- .../x/sys/unix/zerrors_netbsd_amd64.go | 1770 --- .../x/sys/unix/zerrors_netbsd_arm.go | 1759 --- .../x/sys/unix/zerrors_netbsd_arm64.go | 1770 --- .../x/sys/unix/zerrors_openbsd_386.go | 1668 --- .../x/sys/unix/zerrors_openbsd_amd64.go | 1775 --- .../x/sys/unix/zerrors_openbsd_arm.go | 1670 --- .../x/sys/unix/zerrors_openbsd_arm64.go | 1798 ---- .../x/sys/unix/zerrors_openbsd_mips64.go | 1863 ---- .../x/sys/unix/zerrors_solaris_amd64.go | 1557 --- .../x/sys/unix/zerrors_zos_s390x.go | 860 -- .../x/sys/unix/zptrace_armnn_linux.go | 42 - .../x/sys/unix/zptrace_linux_arm64.go | 17 - .../x/sys/unix/zptrace_mipsnn_linux.go | 51 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 51 - .../x/sys/unix/zptrace_x86_linux.go | 81 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1485 --- .../x/sys/unix/zsyscall_aix_ppc64.go | 1443 --- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 --- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 -- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 25 - .../x/sys/unix/zsyscall_darwin_amd64.go | 2519 ----- .../x/sys/unix/zsyscall_darwin_amd64.s | 889 -- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 25 - .../x/sys/unix/zsyscall_darwin_arm64.go | 2519 ----- .../x/sys/unix/zsyscall_darwin_arm64.s | 889 -- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1679 --- .../x/sys/unix/zsyscall_freebsd_386.go | 1889 ---- .../x/sys/unix/zsyscall_freebsd_amd64.go | 1889 ---- .../x/sys/unix/zsyscall_freebsd_arm.go | 1889 ---- .../x/sys/unix/zsyscall_freebsd_arm64.go | 1889 ---- .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1889 ---- .../x/sys/unix/zsyscall_illumos_amd64.go | 128 - .../golang.org/x/sys/unix/zsyscall_linux.go | 2153 ---- .../x/sys/unix/zsyscall_linux_386.go | 537 - .../x/sys/unix/zsyscall_linux_amd64.go | 704 -- .../x/sys/unix/zsyscall_linux_arm.go | 652 -- .../x/sys/unix/zsyscall_linux_arm64.go | 603 -- .../x/sys/unix/zsyscall_linux_loong64.go | 527 - .../x/sys/unix/zsyscall_linux_mips.go | 704 -- .../x/sys/unix/zsyscall_linux_mips64.go | 698 -- .../x/sys/unix/zsyscall_linux_mips64le.go | 687 -- .../x/sys/unix/zsyscall_linux_mipsle.go | 704 -- .../x/sys/unix/zsyscall_linux_ppc.go | 709 -- .../x/sys/unix/zsyscall_linux_ppc64.go | 755 -- .../x/sys/unix/zsyscall_linux_ppc64le.go | 755 -- .../x/sys/unix/zsyscall_linux_riscv64.go | 583 - .../x/sys/unix/zsyscall_linux_s390x.go | 546 - .../x/sys/unix/zsyscall_linux_sparc64.go | 699 -- .../x/sys/unix/zsyscall_netbsd_386.go | 1850 ---- .../x/sys/unix/zsyscall_netbsd_amd64.go | 1850 ---- .../x/sys/unix/zsyscall_netbsd_arm.go | 1850 ---- .../x/sys/unix/zsyscall_netbsd_arm64.go | 1850 ---- .../x/sys/unix/zsyscall_openbsd_386.go | 2221 ---- .../x/sys/unix/zsyscall_openbsd_386.s | 796 -- .../x/sys/unix/zsyscall_openbsd_amd64.go | 2221 ---- .../x/sys/unix/zsyscall_openbsd_amd64.s | 796 -- .../x/sys/unix/zsyscall_openbsd_arm.go | 2221 ---- .../x/sys/unix/zsyscall_openbsd_arm.s | 796 -- .../x/sys/unix/zsyscall_openbsd_arm64.go | 2221 ---- .../x/sys/unix/zsyscall_openbsd_arm64.s | 796 -- .../x/sys/unix/zsyscall_openbsd_mips64.go | 1693 --- .../x/sys/unix/zsyscall_solaris_amd64.go | 2067 ---- .../x/sys/unix/zsyscall_zos_s390x.go | 1255 --- .../x/sys/unix/zsysctl_openbsd_386.go | 274 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 272 - .../x/sys/unix/zsysctl_openbsd_arm.go | 274 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 276 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 280 - .../x/sys/unix/zsysnum_darwin_amd64.go | 440 - .../x/sys/unix/zsysnum_darwin_arm64.go | 438 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 317 - .../x/sys/unix/zsysnum_freebsd_386.go | 394 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 394 - .../x/sys/unix/zsysnum_freebsd_arm.go | 394 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 394 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 394 - .../x/sys/unix/zsysnum_linux_386.go | 450 - .../x/sys/unix/zsysnum_linux_amd64.go | 372 - .../x/sys/unix/zsysnum_linux_arm.go | 414 - .../x/sys/unix/zsysnum_linux_arm64.go | 317 - .../x/sys/unix/zsysnum_linux_loong64.go | 311 - .../x/sys/unix/zsysnum_linux_mips.go | 434 - .../x/sys/unix/zsysnum_linux_mips64.go | 364 - .../x/sys/unix/zsysnum_linux_mips64le.go | 364 - .../x/sys/unix/zsysnum_linux_mipsle.go | 434 - .../x/sys/unix/zsysnum_linux_ppc.go | 441 - .../x/sys/unix/zsysnum_linux_ppc64.go | 413 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 413 - .../x/sys/unix/zsysnum_linux_riscv64.go | 316 - .../x/sys/unix/zsysnum_linux_s390x.go | 378 - .../x/sys/unix/zsysnum_linux_sparc64.go | 392 - .../x/sys/unix/zsysnum_netbsd_386.go | 275 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 275 - .../x/sys/unix/zsysnum_netbsd_arm.go | 275 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 275 - .../x/sys/unix/zsysnum_openbsd_386.go | 220 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 220 - .../x/sys/unix/zsysnum_openbsd_arm.go | 220 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 219 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 221 - .../x/sys/unix/zsysnum_zos_s390x.go | 2670 ----- .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 354 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 358 - .../x/sys/unix/ztypes_darwin_amd64.go | 795 -- .../x/sys/unix/ztypes_darwin_arm64.go | 795 -- .../x/sys/unix/ztypes_dragonfly_amd64.go | 474 - .../x/sys/unix/ztypes_freebsd_386.go | 640 -- .../x/sys/unix/ztypes_freebsd_amd64.go | 644 -- .../x/sys/unix/ztypes_freebsd_arm.go | 630 -- .../x/sys/unix/ztypes_freebsd_arm64.go | 624 -- .../x/sys/unix/ztypes_freebsd_riscv64.go | 626 -- .../x/sys/unix/ztypes_illumos_amd64.go | 42 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 5609 ---------- .../golang.org/x/sys/unix/ztypes_linux_386.go | 690 -- .../x/sys/unix/ztypes_linux_amd64.go | 705 -- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 685 -- .../x/sys/unix/ztypes_linux_arm64.go | 684 -- .../x/sys/unix/ztypes_linux_loong64.go | 685 -- .../x/sys/unix/ztypes_linux_mips.go | 690 -- .../x/sys/unix/ztypes_linux_mips64.go | 687 -- .../x/sys/unix/ztypes_linux_mips64le.go | 687 -- .../x/sys/unix/ztypes_linux_mipsle.go | 690 -- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 698 -- .../x/sys/unix/ztypes_linux_ppc64.go | 693 -- .../x/sys/unix/ztypes_linux_ppc64le.go | 693 -- .../x/sys/unix/ztypes_linux_riscv64.go | 712 -- .../x/sys/unix/ztypes_linux_s390x.go | 707 -- .../x/sys/unix/ztypes_linux_sparc64.go | 688 -- .../x/sys/unix/ztypes_netbsd_386.go | 502 - .../x/sys/unix/ztypes_netbsd_amd64.go | 510 - .../x/sys/unix/ztypes_netbsd_arm.go | 507 - .../x/sys/unix/ztypes_netbsd_arm64.go | 510 - .../x/sys/unix/ztypes_openbsd_386.go | 574 - .../x/sys/unix/ztypes_openbsd_amd64.go | 574 - .../x/sys/unix/ztypes_openbsd_arm.go | 575 - .../x/sys/unix/ztypes_openbsd_arm64.go | 568 - .../x/sys/unix/ztypes_openbsd_mips64.go | 568 - .../x/sys/unix/ztypes_solaris_amd64.go | 482 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 406 - vendor/golang.org/x/sys/windows/aliases.go | 13 - .../golang.org/x/sys/windows/dll_windows.go | 416 - vendor/golang.org/x/sys/windows/empty.s | 9 - .../golang.org/x/sys/windows/env_windows.go | 54 - vendor/golang.org/x/sys/windows/eventlog.go | 21 - .../golang.org/x/sys/windows/exec_windows.go | 178 - .../x/sys/windows/memory_windows.go | 48 - vendor/golang.org/x/sys/windows/mkerrors.bash | 70 - .../x/sys/windows/mkknownfolderids.bash | 27 - vendor/golang.org/x/sys/windows/mksyscall.go | 10 - vendor/golang.org/x/sys/windows/race.go | 31 - vendor/golang.org/x/sys/windows/race0.go | 26 - .../x/sys/windows/security_windows.go | 1444 --- vendor/golang.org/x/sys/windows/service.go | 247 - .../x/sys/windows/setupapi_windows.go | 1425 --- vendor/golang.org/x/sys/windows/str.go | 23 - vendor/golang.org/x/sys/windows/syscall.go | 113 - .../x/sys/windows/syscall_windows.go | 1804 ---- .../golang.org/x/sys/windows/types_windows.go | 3215 ------ .../x/sys/windows/types_windows_386.go | 35 - .../x/sys/windows/types_windows_amd64.go | 34 - .../x/sys/windows/types_windows_arm.go | 35 - .../x/sys/windows/types_windows_arm64.go | 34 - .../x/sys/windows/zerrors_windows.go | 9468 ----------------- .../x/sys/windows/zknownfolderids_windows.go | 149 - .../x/sys/windows/zsyscall_windows.go | 4214 -------- vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/LICENSE | 27 - vendor/golang.org/x/text/PATENTS | 22 - .../golang.org/x/text/transform/transform.go | 709 -- .../x/text/unicode/norm/composition.go | 512 - .../x/text/unicode/norm/forminfo.go | 278 - .../golang.org/x/text/unicode/norm/input.go | 109 - vendor/golang.org/x/text/unicode/norm/iter.go | 458 - .../x/text/unicode/norm/normalize.go | 609 -- .../x/text/unicode/norm/readwriter.go | 125 - .../x/text/unicode/norm/tables10.0.0.go | 7658 ------------- .../x/text/unicode/norm/tables11.0.0.go | 7694 -------------- .../x/text/unicode/norm/tables12.0.0.go | 7711 -------------- .../x/text/unicode/norm/tables13.0.0.go | 7761 -------------- .../x/text/unicode/norm/tables9.0.0.go | 7638 ------------- .../x/text/unicode/norm/transform.go | 88 - vendor/golang.org/x/text/unicode/norm/trie.go | 54 - vendor/modules.txt | 93 - 859 files changed, 21 insertions(+), 360662 deletions(-) create mode 100644 .gitignore delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/.gitignore delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/.golangci.yaml delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/LICENSE delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/README.md delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/bot.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/custom_helpers.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/entities.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/context.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/dispatcher.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handler.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/callbackquery.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatjoinrequest.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatmember.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/choseninlineresult.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/command.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/common.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters/types.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/inlinequery.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/message.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/mychatmember.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/poll.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/pollanswer.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/queryvalidation.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/updater.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/formatting.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_consts.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_helpers.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_methods.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_types.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/request.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/sender.go delete mode 100644 vendor/github.com/PaulSonOfLars/gotgbot/v2/spec_commit delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/LICENSE delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/decode_asm.go delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/encode_asm.go delete mode 100644 vendor/github.com/golang/snappy/encode_other.go delete mode 100644 vendor/github.com/golang/snappy/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/.gitattributes delete mode 100644 vendor/github.com/klauspost/compress/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml delete mode 100644 vendor/github.com/klauspost/compress/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/README.md delete mode 100644 vendor/github.com/klauspost/compress/compressible.go delete mode 100644 vendor/github.com/klauspost/compress/fse/README.md delete mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/compress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/fse.go delete mode 100644 vendor/github.com/klauspost/compress/gen.sh delete mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/huff0/README.md delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go delete mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go delete mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go delete mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/s2sx.mod delete mode 100644 vendor/github.com/klauspost/compress/s2sx.sum delete mode 100644 vendor/github.com/klauspost/compress/zstd/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/history.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 vendor/github.com/montanaflynn/stats/.gitignore delete mode 100644 vendor/github.com/montanaflynn/stats/.travis.yml delete mode 100644 vendor/github.com/montanaflynn/stats/CHANGELOG.md delete mode 100644 vendor/github.com/montanaflynn/stats/DOCUMENTATION.md delete mode 100644 vendor/github.com/montanaflynn/stats/LICENSE delete mode 100644 vendor/github.com/montanaflynn/stats/Makefile delete mode 100644 vendor/github.com/montanaflynn/stats/README.md delete mode 100644 vendor/github.com/montanaflynn/stats/correlation.go delete mode 100644 vendor/github.com/montanaflynn/stats/cumulative_sum.go delete mode 100644 vendor/github.com/montanaflynn/stats/data.go delete mode 100644 vendor/github.com/montanaflynn/stats/deviation.go delete mode 100644 vendor/github.com/montanaflynn/stats/distances.go delete mode 100644 vendor/github.com/montanaflynn/stats/doc.go delete mode 100644 vendor/github.com/montanaflynn/stats/entropy.go delete mode 100644 vendor/github.com/montanaflynn/stats/errors.go delete mode 100644 vendor/github.com/montanaflynn/stats/legacy.go delete mode 100644 vendor/github.com/montanaflynn/stats/load.go delete mode 100644 vendor/github.com/montanaflynn/stats/max.go delete mode 100644 vendor/github.com/montanaflynn/stats/mean.go delete mode 100644 vendor/github.com/montanaflynn/stats/median.go delete mode 100644 vendor/github.com/montanaflynn/stats/min.go delete mode 100644 vendor/github.com/montanaflynn/stats/mode.go delete mode 100644 vendor/github.com/montanaflynn/stats/norm.go delete mode 100644 vendor/github.com/montanaflynn/stats/outlier.go delete mode 100644 vendor/github.com/montanaflynn/stats/percentile.go delete mode 100644 vendor/github.com/montanaflynn/stats/quartile.go delete mode 100644 vendor/github.com/montanaflynn/stats/ranksum.go delete mode 100644 vendor/github.com/montanaflynn/stats/regression.go delete mode 100644 vendor/github.com/montanaflynn/stats/round.go delete mode 100644 vendor/github.com/montanaflynn/stats/sample.go delete mode 100644 vendor/github.com/montanaflynn/stats/sigmoid.go delete mode 100644 vendor/github.com/montanaflynn/stats/softmax.go delete mode 100644 vendor/github.com/montanaflynn/stats/sum.go delete mode 100644 vendor/github.com/montanaflynn/stats/util.go delete mode 100644 vendor/github.com/montanaflynn/stats/variance.go delete mode 100644 vendor/github.com/pkg/errors/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/github.com/pkg/errors/LICENSE delete mode 100644 vendor/github.com/pkg/errors/Makefile delete mode 100644 vendor/github.com/pkg/errors/README.md delete mode 100644 vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/github.com/pkg/errors/errors.go delete mode 100644 vendor/github.com/pkg/errors/go113.go delete mode 100644 vendor/github.com/pkg/errors/stack.go delete mode 100644 vendor/github.com/sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml delete mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/sirupsen/logrus/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go delete mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml delete mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go delete mode 100644 vendor/github.com/sirupsen/logrus/doc.go delete mode 100644 vendor/github.com/sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go delete mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/writer.go delete mode 100644 vendor/github.com/xdg-go/pbkdf2/.gitignore delete mode 100644 vendor/github.com/xdg-go/pbkdf2/LICENSE delete mode 100644 vendor/github.com/xdg-go/pbkdf2/README.md delete mode 100644 vendor/github.com/xdg-go/pbkdf2/pbkdf2.go delete mode 100644 vendor/github.com/xdg-go/scram/.gitignore delete mode 100644 vendor/github.com/xdg-go/scram/CHANGELOG.md delete mode 100644 vendor/github.com/xdg-go/scram/LICENSE delete mode 100644 vendor/github.com/xdg-go/scram/README.md delete mode 100644 vendor/github.com/xdg-go/scram/client.go delete mode 100644 vendor/github.com/xdg-go/scram/client_conv.go delete mode 100644 vendor/github.com/xdg-go/scram/common.go delete mode 100644 vendor/github.com/xdg-go/scram/doc.go delete mode 100644 vendor/github.com/xdg-go/scram/parse.go delete mode 100644 vendor/github.com/xdg-go/scram/scram.go delete mode 100644 vendor/github.com/xdg-go/scram/server.go delete mode 100644 vendor/github.com/xdg-go/scram/server_conv.go delete mode 100644 vendor/github.com/xdg-go/stringprep/.gitignore delete mode 100644 vendor/github.com/xdg-go/stringprep/CHANGELOG.md delete mode 100644 vendor/github.com/xdg-go/stringprep/LICENSE delete mode 100644 vendor/github.com/xdg-go/stringprep/README.md delete mode 100644 vendor/github.com/xdg-go/stringprep/bidi.go delete mode 100644 vendor/github.com/xdg-go/stringprep/doc.go delete mode 100644 vendor/github.com/xdg-go/stringprep/error.go delete mode 100644 vendor/github.com/xdg-go/stringprep/map.go delete mode 100644 vendor/github.com/xdg-go/stringprep/profile.go delete mode 100644 vendor/github.com/xdg-go/stringprep/saslprep.go delete mode 100644 vendor/github.com/xdg-go/stringprep/set.go delete mode 100644 vendor/github.com/xdg-go/stringprep/tables.go delete mode 100644 vendor/github.com/youmark/pkcs8/.gitignore delete mode 100644 vendor/github.com/youmark/pkcs8/.travis.yml delete mode 100644 vendor/github.com/youmark/pkcs8/LICENSE delete mode 100644 vendor/github.com/youmark/pkcs8/README delete mode 100644 vendor/github.com/youmark/pkcs8/README.md delete mode 100644 vendor/github.com/youmark/pkcs8/cipher.go delete mode 100644 vendor/github.com/youmark/pkcs8/cipher_3des.go delete mode 100644 vendor/github.com/youmark/pkcs8/cipher_aes.go delete mode 100644 vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go delete mode 100644 vendor/github.com/youmark/pkcs8/kdf_scrypt.go delete mode 100644 vendor/github.com/youmark/pkcs8/pkcs8.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/event/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/event/monitoring.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/background_context.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/cancellation_listener.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/const.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/csot_util.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/error.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/http.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/string_util.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/client.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/collection.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/cursor.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/database.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/description.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/server.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/server_kind.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/topology_kind.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/topology_version.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/description/version_range.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/errors.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/index_view.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/mongo.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/serverapioptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/results.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/session.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/single_result.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/util.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/tag/tag.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/version/version.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/constructor.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/conversation.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/cred.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/credentials.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/request.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rest.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/list_collections_batch_cursor.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/state.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/cache.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/errors.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_exhaust.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/serverapioptions.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/cluster_clock.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/cancellation_listener.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/diff.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_16.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/tls_connection_source_1_17.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go delete mode 100644 vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go delete mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go delete mode 100644 vendor/golang.org/x/sys/LICENSE delete mode 100644 vendor/golang.org/x/sys/PATENTS delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go delete mode 100644 vendor/golang.org/x/sys/unix/.gitignore delete mode 100644 vendor/golang.org/x/sys/unix/README.md delete mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/aliases.go delete mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/constants.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/dirent.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_big.go delete mode 100644 vendor/golang.org/x/sys/unix/endian_little.go delete mode 100644 vendor/golang.org/x/sys/unix/env_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go delete mode 100644 vendor/golang.org/x/sys/unix/fdset.go delete mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c delete mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ifreq_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/mkall.sh delete mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go delete mode 100644 vendor/golang.org/x/sys/unix/race.go delete mode 100644 vendor/golang.org/x/sys/unix/race0.go delete mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go delete mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_alarm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/sysvshm_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix_other.go delete mode 100644 vendor/golang.org/x/sys/unix/timestruct.go delete mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zptrace_x86_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/empty.s delete mode 100644 vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash delete mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash delete mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/race.go delete mode 100644 vendor/golang.org/x/sys/windows/race0.go delete mode 100644 vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/service.go delete mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/str.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go delete mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/LICENSE delete mode 100644 vendor/golang.org/x/text/PATENTS delete mode 100644 vendor/golang.org/x/text/transform/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/input.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go delete mode 100644 vendor/modules.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b8c2a57 --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/.gitignore b/vendor/github.com/PaulSonOfLars/gotgbot/v2/.gitignore deleted file mode 100644 index f46854f..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -.idea/ diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/.golangci.yaml b/vendor/github.com/PaulSonOfLars/gotgbot/v2/.golangci.yaml deleted file mode 100644 index 876e740..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/.golangci.yaml +++ /dev/null @@ -1,122 +0,0 @@ -run: - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 1m - - # exit code when at least one issue was found, default is 1 - issues-exit-code: 1 - - # include test files or not, default is true - tests: true - -# output configuration options -output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate|junit-xml|github-actions - # default is "colored-line-number" - format: colored-line-number - - # print lines of code with issue, default is true - print-issued-lines: true - - # print linter name in the end of issue text, default is true - print-linter-name: true - - # make issues output unique by line, default is true - uniq-by-line: true - - # sorts results by: filepath, line and column - sort-results: true - -linters: - disable-all: true - enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - asciicheck - - bodyclose - - bodyclose - - dupl - - durationcheck - - errorlint - - exhaustive - - exportloopref - - forbidigo - - forcetypeassert - - godot - - goerr113 - - gofmt - - goimports - - gomoddirectives - - goprintffuncname - - gosec - # - ifshort - - importas - # - lll - - makezero - - misspell - - nakedret - # - nilassign - - nilerr - - noctx - # - nolintlint - # - paralleltest - - prealloc - - promlinter - - tagliatelle - - unconvert - - unparam - - wastedassign -issues: - # Excluding configuration per-path, per-linter, per-text and per-source - exclude-rules: - # Don't run forbidigo on samples, since prints are OK - - path: samples/ - linters: - - forbidigo - # Don't run error or allocation optimisations on scripts, since they're not necessary to generate code - - path: scripts/generate/ - linters: - - prealloc - - goerr113 - # Exclude some `staticcheck` messages. - - linters: - - staticcheck - text: "SA1019:" # warning about "strings.Title" being deprecated for unicode issue; we dont need unicode here. - - # Enable default excludes, for common sense values. - exclude-use-default: true - - # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. - max-same-issues: 0 - -severity: - # Default value is empty string. - # Set the default severity for issues. If severity rules are defined and the issues - # do not match or no severity is provided to the rule this will be the default - # severity applied. Severities should match the supported severity names of the - # selected out format. - # - Code climate: https://docs.codeclimate.com/docs/issues#issue-severity - # - Checkstyle: https://checkstyle.sourceforge.io/property_types.html#severity - # - Github: https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message - default-severity: error - - # The default value is false. - # If set to true severity-rules regular expressions become case sensitive. - case-sensitive: false - -linters-settings: - tagliatelle: - # check the struck tag name case - case: - # use the struct field name to check the name of the struct tag - use-field-name: true - rules: - # JSON must use snake case - json: snake diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/LICENSE b/vendor/github.com/PaulSonOfLars/gotgbot/v2/LICENSE deleted file mode 100644 index 04eb304..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Paul Larsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/README.md b/vendor/github.com/PaulSonOfLars/gotgbot/v2/README.md deleted file mode 100644 index 9b0b528..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Golang Telegram Bot library - -Heavily inspired by the [python-telegram-bot library](https://github.com/python-telegram-bot/python-telegram-bot), this -package is a code-generated wrapper for the telegram bot api. We also provide an extensions package which defines an -updater/dispatcher pattern to provide update processing out of the box. - -All the telegram types and methods are generated from -[a bot api spec](https://github.com/PaulSonOfLars/telegram-bot-api-spec). These are generated in the `gen_*.go` files. - -If you have any questions, come find us in our [telegram support chat](https://t.me/GotgbotChat)! - -## Features: - -- All telegram API types and methods are generated from the bot api docs, which makes this library: - - Guaranteed to match the docs - - Easy to update - - Self-documenting (Re-uses pre-existing telegram docs) -- Type safe; no weird interface{} logic, all types match the bot API docs. -- No third party library bloat; only uses standard library. -- Updates are each processed in their own go routine, encouraging concurrent processing, and keeping your bot - responsive. -- Code panics are automatically recovered from and logged, avoiding unexpected downtime. - -## Getting started - -Download the library with the standard `go get` command: - -```bash -go get github.com/PaulSonOfLars/gotgbot/v2 -``` - -### Example bots - -Sample bots can be found in the [samples directory](samples). - -## Docs - -Docs can be found [here](https://pkg.go.dev/github.com/PaulSonOfLars/gotgbot/v2). - -## Contributing - -Contributions are welcome! More information on contributing can be found [here](.github/CONTRIBUTING.md). - -### Regenerating the generated code. - -If you've made changes to the code generation, you will probably need to regenerate the library code. -This can be done simply by running `go generate` from the repo root. Running this will generate the code from the -specification repo at the commit pinned in the `spec_commit` file. - -To upgrade the commit in `spec_commit` and regenerate your code, simply run `GOTGBOT_UPGRADE=true go generate`. -This will fetch the latest commit sha, and regenerate the library against that, giving you the latest version -available. diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/bot.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/bot.go deleted file mode 100644 index 0ff3af3..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/bot.go +++ /dev/null @@ -1,101 +0,0 @@ -package gotgbot - -import ( - "encoding/json" - "errors" - "net/http" - "time" -) - -//go:generate go run ./scripts/generate - -// Bot is the default Bot struct used to send and receive messages to the telegram API. -type Bot struct { - // The bot's User info, as returned by Bot.GetMe. Populated when created through the NewBot method. - User - // The bot client to use to make requests - BotClient -} - -// BotOpts declares all optional parameters for the NewBot function. -type BotOpts struct { - // HTTP client with any custom settings (eg proxy information) that might be necessary. - Client http.Client - // Disables the token validity check. - // Useful when running in time-constrained environments where the startup time should be minimised, and where the - // token can be assumed to be valid (eg lambdas). - // Warning: Disabling the token check will mean that the Bot.User struct will no longer be populated. - DisableTokenCheck bool - // UseTestEnvironment defines whether this bot was created to run on telegram's test environment. - // Enabling this uses a slightly different API path. - // See https://core.telegram.org/bots/webapps#using-bots-in-the-test-environment for more details. - UseTestEnvironment bool - // Request opts to use for checking token validity with Bot.GetMe. Can be slow - a high timeout (eg 10s) is - // recommended. - RequestOpts *RequestOpts - // Default opts to use for all requests, when no other request opts are specified. - DefaultRequestOpts *RequestOpts -} - -// NewBot returns a new Bot struct populated with the necessary defaults. -func NewBot(token string, opts *BotOpts) (*Bot, error) { - botClient := &BaseBotClient{ - Token: token, - Client: http.Client{}, - DefaultRequestOpts: nil, - } - - // Large timeout on the initial GetMe request as this can sometimes be slow. - getMeReqOpts := &RequestOpts{ - Timeout: 10 * time.Second, - APIURL: DefaultAPIURL, - } - - checkTokenValidity := true - if opts != nil { - botClient.Client = opts.Client - botClient.UseTestEnvironment = opts.UseTestEnvironment - if opts.DefaultRequestOpts != nil { - botClient.DefaultRequestOpts = opts.DefaultRequestOpts - } - if opts.RequestOpts != nil { - getMeReqOpts = opts.RequestOpts - } - checkTokenValidity = !opts.DisableTokenCheck - } - - b := Bot{ - BotClient: botClient, - } - - if checkTokenValidity { - // Get bot info. This serves two purposes: - // 1. Check token is valid. - // 2. Populate the bot struct "User" field. - botUser, err := b.GetMe(&GetMeOpts{RequestOpts: getMeReqOpts}) - if err != nil { - return nil, err - } - b.User = *botUser - } - - return &b, nil -} - -func (bot *Bot) UseMiddleware(mw func(client BotClient) BotClient) *Bot { - bot.BotClient = mw(bot.BotClient) - return bot -} - -var ErrNilBotClient = errors.New("nil BotClient") - -func (bot *Bot) Request(method string, params map[string]string, data map[string]NamedReader, opts *RequestOpts) (json.RawMessage, error) { - if bot.BotClient == nil { - return nil, ErrNilBotClient - } - - ctx, cancel := bot.BotClient.TimeoutContext(opts) - defer cancel() - - return bot.BotClient.RequestWithContext(ctx, method, params, data, opts) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/custom_helpers.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/custom_helpers.go deleted file mode 100644 index aaec888..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/custom_helpers.go +++ /dev/null @@ -1,53 +0,0 @@ -package gotgbot - -import ( - "fmt" - "strconv" - "strings" -) - -// GetLink is a helper method to easily get the message link (It will return an empty string in case of private or group chat type). -func (m Message) GetLink() string { - if m.Chat.Type == "private" || m.Chat.Type == "group" { - return "" - } - if m.Chat.Username != "" { - return fmt.Sprintf("https://t.me/%s/%d", m.Chat.Username, m.MessageId) - } - // Message links use raw chatIds without the -100 prefix; this trims that prefix. - rawChatId := strings.TrimPrefix(strconv.FormatInt(m.Chat.Id, 10), "-100") - return fmt.Sprintf("https://t.me/c/%s/%d", rawChatId, m.MessageId) -} - -// Reply is a helper function to easily call Bot.SendMessage as a reply to an existing message. -func (m Message) Reply(b *Bot, text string, opts *SendMessageOpts) (*Message, error) { - if opts == nil { - opts = &SendMessageOpts{} - } - - if opts.ReplyToMessageId == 0 { - opts.ReplyToMessageId = m.MessageId - } - - return b.SendMessage(m.Chat.Id, text, opts) -} - -// SendMessage is a helper function to easily call Bot.SendMessage in a chat. -func (c Chat) SendMessage(b *Bot, text string, opts *SendMessageOpts) (*Message, error) { - return b.SendMessage(c.Id, text, opts) -} - -// Unban is a helper function to easily call Bot.UnbanChatMember in a chat. -func (c Chat) Unban(b *Bot, userId int64, opts *UnbanChatMemberOpts) (bool, error) { - return b.UnbanChatMember(c.Id, userId, opts) -} - -// Promote is a helper function to easily call Bot.PromoteChatMember in a chat. -func (c Chat) Promote(b *Bot, userId int64, opts *PromoteChatMemberOpts) (bool, error) { - return b.PromoteChatMember(c.Id, userId, opts) -} - -// GetURL gets the URL the file can be downloaded from. -func (f File) GetURL(b *Bot) string { - return fmt.Sprintf("%s/file/bot%s/%s", b.GetAPIURL(), b.GetToken(), f.FilePath) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/entities.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/entities.go deleted file mode 100644 index fa9c20d..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/entities.go +++ /dev/null @@ -1,66 +0,0 @@ -package gotgbot - -import "unicode/utf16" - -type ParsedMessageEntity struct { - MessageEntity - Text string `json:"text"` -} - -// ParseEntities calls Message.ParseEntity on all message text entities. -func (m Message) ParseEntities() (out []ParsedMessageEntity) { - return m.ParseEntityTypes(nil) -} - -// ParseCaptionEntities calls Message.ParseEntity on all message caption entities. -func (m Message) ParseCaptionEntities() (out []ParsedMessageEntity) { - return m.ParseCaptionEntityTypes(nil) -} - -// ParseEntityTypes calls Message.ParseEntity on a subset of message text entities. -func (m Message) ParseEntityTypes(accepted map[string]struct{}) (out []ParsedMessageEntity) { - utf16Text := utf16.Encode([]rune(m.Text)) - for _, ent := range m.Entities { - if _, ok := accepted[ent.Type]; ok || accepted == nil { - out = append(out, parseEntity(ent, utf16Text)) - } - } - return out -} - -// ParseCaptionEntityTypes calls Message.ParseEntity on a subset of message caption entities. -func (m Message) ParseCaptionEntityTypes(accepted map[string]struct{}) (out []ParsedMessageEntity) { - utf16Caption := utf16.Encode([]rune(m.Caption)) - for _, ent := range m.CaptionEntities { - if _, ok := accepted[ent.Type]; ok || accepted == nil { - out = append(out, parseEntity(ent, utf16Caption)) - } - } - return out -} - -// ParseEntity parses a single message text entity to populate text contents, URL, and offsets in UTF8. -func (m Message) ParseEntity(entity MessageEntity) ParsedMessageEntity { - return parseEntity(entity, utf16.Encode([]rune(m.Text))) -} - -// ParseCaptionEntity parses a single message caption entity to populate text contents, URL, and offsets in UTF8. -func (m Message) ParseCaptionEntity(entity MessageEntity) ParsedMessageEntity { - return parseEntity(entity, utf16.Encode([]rune(m.Caption))) -} - -func parseEntity(entity MessageEntity, utf16Text []uint16) ParsedMessageEntity { - text := string(utf16.Decode(utf16Text[entity.Offset : entity.Offset+entity.Length])) - - if entity.Type == "url" { - entity.Url = text - } - - entity.Offset = int64(len(string(utf16.Decode(utf16Text[:entity.Offset])))) - entity.Length = int64(len(text)) - - return ParsedMessageEntity{ - MessageEntity: entity, - Text: text, - } -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/context.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/context.go deleted file mode 100644 index 0914fc1..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/context.go +++ /dev/null @@ -1,152 +0,0 @@ -package ext - -import ( - "strings" - - "github.com/PaulSonOfLars/gotgbot/v2" -) - -// TODO: extend to be used as a generic cancel context? -type Context struct { - *gotgbot.Update - Data map[string]interface{} - - // EffectiveMessage is the message which triggered the update, if possible - EffectiveMessage *gotgbot.Message - // EffectiveChat is the chat the update was triggered in, if possible - EffectiveChat *gotgbot.Chat - // EffectiveUser is the user who triggered the update, if possible. - // Note: when adding a user, the user who ADDED should be the EffectiveUser; - // they caused the update. If a user joins naturally, then they are the EffectiveUser. - // - // WARNING: It may be better to rely on EffectiveSender instead, which allows for easier use - // in the case of linked channels, anonymous admins, or anonymous channels. - EffectiveUser *gotgbot.User - // EffectiveSender is the sender of the update. This can be either: - // - a user - // - an anonymous admin of the current chat, speaking through the chat - // - the linked channel of the current chat - // - an anonymous user, speaking through a channel - EffectiveSender *gotgbot.Sender -} - -// NewContext populates a context with the relevant fields from the current update. -// It takes a data field in the case where custom data needs to be passed. -func NewContext(update *gotgbot.Update, data map[string]interface{}) *Context { - var msg *gotgbot.Message - var chat *gotgbot.Chat - var user *gotgbot.User - var sender *gotgbot.Sender - - switch { - case update.Message != nil: - msg = update.Message - chat = &update.Message.Chat - user = update.Message.From - - case update.EditedMessage != nil: - msg = update.EditedMessage - chat = &update.EditedMessage.Chat - user = update.EditedMessage.From - - case update.ChannelPost != nil: - msg = update.ChannelPost - chat = &update.ChannelPost.Chat - - case update.EditedChannelPost != nil: - msg = update.EditedChannelPost - chat = &update.EditedChannelPost.Chat - - case update.InlineQuery != nil: - user = &update.InlineQuery.From - - case update.CallbackQuery != nil: - user = &update.CallbackQuery.From - - if update.CallbackQuery.Message != nil { - msg = update.CallbackQuery.Message - chat = &update.CallbackQuery.Message.Chat - // Note: the sender is the sender of the CallbackQuery; not the sender of the CallbackQuery.Message. - sender = &gotgbot.Sender{User: user, ChatId: chat.Id} - } - - case update.ChosenInlineResult != nil: - user = &update.ChosenInlineResult.From - - case update.ShippingQuery != nil: - user = &update.ShippingQuery.From - - case update.PreCheckoutQuery != nil: - user = &update.PreCheckoutQuery.From - - case update.MyChatMember != nil: - user = &update.MyChatMember.From - chat = &update.MyChatMember.Chat - - case update.ChatMember != nil: - user = &update.ChatMember.From - chat = &update.ChatMember.Chat - - case update.ChatJoinRequest != nil: - user = &update.ChatJoinRequest.From - chat = &update.ChatJoinRequest.Chat - } - - if data == nil { - data = make(map[string]interface{}) - } - - if sender == nil { - if msg != nil { - sender = msg.GetSender() - } else if user != nil { - sender = &gotgbot.Sender{User: user} - if chat != nil { - sender.ChatId = chat.Id - } - } - } - - return &Context{ - Update: update, - Data: data, - EffectiveMessage: msg, - EffectiveChat: chat, - EffectiveUser: user, - EffectiveSender: sender, - } -} - -// Args gets the list of whitespace-separated arguments of the message text. -func (c *Context) Args() []string { - var msg *gotgbot.Message - - switch { - case c.Update.Message != nil: - msg = c.Update.Message - - case c.Update.EditedMessage != nil: - msg = c.Update.EditedMessage - - case c.Update.ChannelPost != nil: - msg = c.Update.ChannelPost - - case c.Update.EditedChannelPost != nil: - msg = c.Update.EditedChannelPost - - case c.Update.CallbackQuery != nil && c.Update.CallbackQuery.Message != nil: - msg = c.Update.CallbackQuery.Message - } - - if msg == nil { - return nil - } - - if msg.Text != "" { - return strings.Fields(msg.Text) - } else if msg.Caption != "" { - return strings.Fields(msg.Caption) - } - - return nil -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/dispatcher.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/dispatcher.go deleted file mode 100644 index 2c5907d..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/dispatcher.go +++ /dev/null @@ -1,263 +0,0 @@ -package ext - -import ( - "encoding/json" - "errors" - "log" - "runtime/debug" - "sort" - "sync" - - "github.com/PaulSonOfLars/gotgbot/v2" -) - -const DefaultMaxRoutines = 50 - -type ( - // DispatcherErrorHandler allows for handling the returned errors from matched handlers. - // It takes the non-nil error returned by the handler. - DispatcherErrorHandler func(b *gotgbot.Bot, ctx *Context, err error) DispatcherAction - // DispatcherPanicHandler allows for handling goroutine panics, where the 'r' value contains the reason for the panic. - DispatcherPanicHandler func(b *gotgbot.Bot, ctx *Context, r interface{}) -) - -type DispatcherAction int64 - -const ( - // DispatcherActionNoop stops iteration of current group and moves to the next one. - // This is the default action, and the same as would happen if the handler had completed successfully. - DispatcherActionNoop DispatcherAction = iota - // DispatcherActionContinueGroups continues iterating over current group as if the current handler did not match. - // Functionally the same as returning ContinueGroups. - DispatcherActionContinueGroups - // DispatcherActionEndGroups ends all group iteration. - // Functionally the same as returning EndGroups. - DispatcherActionEndGroups -) - -var ( - EndGroups = errors.New("group iteration ended") - ContinueGroups = errors.New("group iteration continued") -) - -type Dispatcher struct { - // Error handles any errors that occur during handler execution. The return type determines how to handle the - // current group iteration. Default is DispatcherActionNoop; move to next group. - Error DispatcherErrorHandler - // Panic handles any panics that occur during handler execution. - // If this field is nil, the stack is logged to ErrorLog. - Panic DispatcherPanicHandler - // ErrorLog is the output to log to when handling a library error, or recovering from a panic from user code. - ErrorLog *log.Logger - - // handlerGroups represents the list of available handler groups, numerically sorted. - handlerGroups []int - // handlers represents all available handles, split into groups (see handlerGroups). - handlers map[int][]Handler - - // updatesChan is the channel that the dispatcher receives all new updates on. - updatesChan chan json.RawMessage - // limiter is how we limit the maximum number of goroutines for handling updates. - // if nil, this is a limitless dispatcher. - limiter chan struct{} - // waitGroup handles the number of running operations to allow for clean shutdowns. - waitGroup sync.WaitGroup -} - -type DispatcherOpts struct { - // Error handles any errors that occur during handler execution. - Error DispatcherErrorHandler - // Panic handles any panics that occur during handler execution. - // If no panic handlers are defined, the stack is logged to ErrorLog. - Panic DispatcherPanicHandler - // ErrorLog is the output to log to when handling a library error, or recovering from a panic from user code. - ErrorLog *log.Logger - - // MaxRoutines is used to decide how to limit the number of goroutines spawned by the dispatcher. - // This defines how many updates can be processed at the same time. - // If MaxRoutines == 0, DefaultMaxRoutines is used instead. - // If MaxRoutines < 0, no limits are imposed. - // If MaxRoutines > 0, that value is used. - MaxRoutines int -} - -// NewDispatcher creates a new dispatcher, which process and handles incoming updates from the updates channel. -func NewDispatcher(updates chan json.RawMessage, opts *DispatcherOpts) *Dispatcher { - var errFunc DispatcherErrorHandler - var panicFunc DispatcherPanicHandler - - maxRoutines := DefaultMaxRoutines - errLog := errorLog - - if opts != nil { - if opts.MaxRoutines != 0 { - maxRoutines = opts.MaxRoutines - } - - if opts.ErrorLog != nil { - errLog = opts.ErrorLog - } - - errFunc = opts.Error - panicFunc = opts.Panic - } - - var limiter chan struct{} - // if maxRoutines < 0, we use a limitless dispatcher. (limiter == nil) - if maxRoutines >= 0 { - if maxRoutines == 0 { - maxRoutines = DefaultMaxRoutines - } - - limiter = make(chan struct{}, maxRoutines) - } - - return &Dispatcher{ - Error: errFunc, - Panic: panicFunc, - ErrorLog: errLog, - updatesChan: updates, - handlers: make(map[int][]Handler), - limiter: limiter, - waitGroup: sync.WaitGroup{}, - } -} - -// CurrentUsage returns the current number of concurrently processing updates. -func (d *Dispatcher) CurrentUsage() int { - return len(d.limiter) -} - -// MaxUsage returns the maximum number of concurrently processing updates. -func (d *Dispatcher) MaxUsage() int { - return cap(d.limiter) -} - -// Start to handle incoming updates. -func (d *Dispatcher) Start(b *gotgbot.Bot) { - if d.limiter == nil { - d.limitlessDispatcher(b) - return - } - - d.limitedDispatcher(b) -} - -// Stop waits for all currently processing updates to finish, and then returns. -func (d *Dispatcher) Stop() { - d.waitGroup.Wait() -} - -func (d *Dispatcher) limitedDispatcher(b *gotgbot.Bot) { - for upd := range d.updatesChan { - d.waitGroup.Add(1) - - // Send empty data to limiter. - // if limiter buffer is full, it blocks until another update finishes processing. - d.limiter <- struct{}{} - go func(upd json.RawMessage) { - d.ProcessRawUpdate(b, upd) - - <-d.limiter - d.waitGroup.Done() - }(upd) - } -} - -func (d *Dispatcher) limitlessDispatcher(b *gotgbot.Bot) { - for upd := range d.updatesChan { - d.waitGroup.Add(1) - - go func(upd json.RawMessage) { - d.ProcessRawUpdate(b, upd) - d.waitGroup.Done() - }(upd) - } -} - -// AddHandler adds a new handler to the dispatcher. The dispatcher will call CheckUpdate() to see whether the handler -// should be executed, and then HandleUpdate() to execute it. -func (d *Dispatcher) AddHandler(handler Handler) { - d.AddHandlerToGroup(handler, 0) -} - -// AddHandlerToGroup adds a handler to a specific group; lowest number will be processed first. -func (d *Dispatcher) AddHandlerToGroup(handler Handler, group int) { - currHandlers, ok := d.handlers[group] - if !ok { - d.handlerGroups = append(d.handlerGroups, group) - sort.Ints(d.handlerGroups) - } - d.handlers[group] = append(currHandlers, handler) -} - -func (d *Dispatcher) ProcessRawUpdate(b *gotgbot.Bot, r json.RawMessage) { - var upd gotgbot.Update - if err := json.Unmarshal(r, &upd); err != nil { - d.ErrorLog.Println("failed to process raw update: " + err.Error()) - return - } - - d.ProcessUpdate(b, &upd, nil) -} - -// ProcessUpdate iterates over the list of groups to execute the matching handlers. -func (d *Dispatcher) ProcessUpdate(b *gotgbot.Bot, update *gotgbot.Update, data map[string]interface{}) { - var ctx *Context - - defer func() { - if r := recover(); r != nil { - if d.Panic != nil { - d.Panic(b, ctx, r) - return - } - // Print reason for panic + stack for some sort of helpful log output - d.ErrorLog.Println(r) - d.ErrorLog.Println(string(debug.Stack())) - } - }() - - for _, groupNum := range d.handlerGroups { - for _, handler := range d.handlers[groupNum] { - if !handler.CheckUpdate(b, update) { - continue - } - - if ctx == nil { - ctx = NewContext(update, data) - } - - err := handler.HandleUpdate(b, ctx) - if err != nil { - if errors.Is(err, ContinueGroups) { - // Continue handling current group. - continue - } else if errors.Is(err, EndGroups) { - // Stop all group handling. - return - } else { - action := DispatcherActionNoop - if d.Error != nil { - action = d.Error(b, ctx, err) - } - - switch action { - case DispatcherActionNoop: - // Move on to next group; same action as if group had been successful. - case DispatcherActionContinueGroups: - // Continue handling current group. - continue - case DispatcherActionEndGroups: - // Stop all group handling. - return - default: - d.ErrorLog.Printf("unknown action %d, ending groups here", action) - return - } - } - } - - break // move to next group - } - } -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handler.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handler.go deleted file mode 100644 index e9fd09c..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handler.go +++ /dev/null @@ -1,12 +0,0 @@ -package ext - -import "github.com/PaulSonOfLars/gotgbot/v2" - -type Handler interface { - // CheckUpdate checks whether the update should handled by this handler. - CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool - // HandleUpdate processes the update. - HandleUpdate(b *gotgbot.Bot, ctx *Context) error - // Name gets the handler name; used to differentiate handlers programmatically. Names should be unique. - Name() string -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/callbackquery.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/callbackquery.go deleted file mode 100644 index edd001f..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/callbackquery.go +++ /dev/null @@ -1,42 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type CallbackQuery struct { - AllowChannel bool - Filter filters.CallbackQuery - Response Response -} - -func NewCallback(filter filters.CallbackQuery, r Response) CallbackQuery { - return CallbackQuery{ - Filter: filter, - Response: r, - } -} - -func (cb CallbackQuery) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return cb.Response(b, ctx) -} - -func (cb CallbackQuery) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.CallbackQuery == nil { - return false - } - - if !cb.AllowChannel && u.CallbackQuery.Message != nil && u.CallbackQuery.Message.Chat.Type == "channel" { - return false - } - - return cb.Filter == nil || cb.Filter(u.CallbackQuery) -} - -func (cb CallbackQuery) Name() string { - return fmt.Sprintf("inlinequery_%p", cb.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatjoinrequest.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatjoinrequest.go deleted file mode 100644 index f89dabc..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatjoinrequest.go +++ /dev/null @@ -1,36 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type ChatJoinRequest struct { - Filter filters.ChatJoinRequest - Response Response -} - -func NewChatJoinRequest(f filters.ChatJoinRequest, r Response) ChatJoinRequest { - return ChatJoinRequest{ - Filter: f, - Response: r, - } -} - -func (r ChatJoinRequest) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.ChatJoinRequest == nil { - return false - } - return r.Filter == nil || r.Filter(u.ChatJoinRequest) -} - -func (r ChatJoinRequest) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return r.Response(b, ctx) -} - -func (r ChatJoinRequest) Name() string { - return fmt.Sprintf("chatjoinrequest_%p", r.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatmember.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatmember.go deleted file mode 100644 index 2860450..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/chatmember.go +++ /dev/null @@ -1,37 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type ChatMember struct { - Response Response - Filter filters.ChatMember -} - -func NewChatMember(f filters.ChatMember, r Response) ChatMember { - return ChatMember{ - Response: r, - Filter: f, - } -} - -func (c ChatMember) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.ChatMember == nil { - return false - } - - return c.Filter == nil || c.Filter(u.ChatMember) -} - -func (c ChatMember) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return c.Response(b, ctx) -} - -func (c ChatMember) Name() string { - return fmt.Sprintf("chatmember_%p", c.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/choseninlineresult.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/choseninlineresult.go deleted file mode 100644 index 0a49a5a..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/choseninlineresult.go +++ /dev/null @@ -1,37 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type ChosenInlineResult struct { - Filter filters.ChosenInlineResult - Response Response -} - -func NewChosenInlineResult(filter filters.ChosenInlineResult, r Response) ChosenInlineResult { - return ChosenInlineResult{ - Filter: filter, - Response: r, - } -} - -func (i ChosenInlineResult) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return i.Response(b, ctx) -} - -func (i ChosenInlineResult) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.ChosenInlineResult == nil { - return false - } - - return i.Filter == nil || i.Filter(u.ChosenInlineResult) -} - -func (i ChosenInlineResult) Name() string { - return fmt.Sprintf("choseninlineresult_%p", i.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/command.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/command.go deleted file mode 100644 index 190a584..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/command.go +++ /dev/null @@ -1,98 +0,0 @@ -package handlers - -import ( - "strings" - "unicode/utf8" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" -) - -type Command struct { - Triggers []rune - AllowEdited bool - AllowChannel bool - Command string // should be lowercase for case-insensitivity - Response Response -} - -func NewCommand(c string, r Response) Command { - return Command{ - Triggers: []rune{'/'}, - AllowEdited: false, - AllowChannel: false, - Command: strings.ToLower(c), - Response: r, - } -} - -func (c Command) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.Message != nil { - if u.Message.Text == "" && u.Message.Caption == "" { - return false - } - return c.checkMessage(b, u.Message) - } - - // if no edits and message is edited - if c.AllowEdited && u.EditedMessage != nil { - if u.EditedMessage.Text == "" && u.EditedMessage.Caption == "" { - return false - } - return c.checkMessage(b, u.EditedMessage) - } - // if no channel and message is channel message - if c.AllowChannel && u.ChannelPost != nil { - if u.ChannelPost.Text == "" && u.ChannelPost.Caption == "" { - return false - } - return c.checkMessage(b, u.ChannelPost) - } - // if no channel, no edits, and post is edited - if c.AllowChannel && c.AllowEdited && u.EditedChannelPost != nil { - if u.EditedChannelPost.Text == "" && u.EditedChannelPost.Caption == "" { - return false - } - return c.checkMessage(b, u.EditedChannelPost) - } - - return false -} - -func (c Command) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return c.Response(b, ctx) -} - -func (c Command) Name() string { - return "command_" + c.Command -} - -func (c Command) checkMessage(b *gotgbot.Bot, msg *gotgbot.Message) bool { - text := msg.Text - if msg.Caption != "" { - text = msg.Caption - } - - var cmd string - for _, t := range c.Triggers { - if r, _ := utf8.DecodeRuneInString(text); r != t { - continue - } - - split := strings.Split(strings.ToLower(strings.Fields(text)[0]), "@") - if len(split) > 1 && split[1] != strings.ToLower(b.User.Username) { - return false - } - cmd = split[0][1:] - break - } - if cmd == "" { - return false - } - - if len(msg.Entities) != 0 && msg.Entities[0].Offset == 0 && msg.Entities[0].Type != "bot_command" { - return false - } - - return cmd == c.Command -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/common.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/common.go deleted file mode 100644 index fff528b..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/common.go +++ /dev/null @@ -1,8 +0,0 @@ -package handlers - -import ( - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" -) - -type Response func(b *gotgbot.Bot, ctx *ext.Context) error diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters/types.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters/types.go deleted file mode 100644 index 6e35744..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters/types.go +++ /dev/null @@ -1,14 +0,0 @@ -package filters - -import "github.com/PaulSonOfLars/gotgbot/v2" - -type ( - CallbackQuery func(cq *gotgbot.CallbackQuery) bool - ChatMember func(u *gotgbot.ChatMemberUpdated) bool - ChosenInlineResult func(cir *gotgbot.ChosenInlineResult) bool - InlineQuery func(iq *gotgbot.InlineQuery) bool - Message func(msg *gotgbot.Message) bool - Poll func(poll *gotgbot.Poll) bool - PollAnswer func(pa *gotgbot.PollAnswer) bool - ChatJoinRequest func(cjr *gotgbot.ChatJoinRequest) bool -) diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/inlinequery.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/inlinequery.go deleted file mode 100644 index 0c32f29..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/inlinequery.go +++ /dev/null @@ -1,37 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type InlineQuery struct { - Filter filters.InlineQuery - Response Response -} - -func NewInlineQuery(f filters.InlineQuery, r Response) InlineQuery { - return InlineQuery{ - Filter: f, - Response: r, - } -} - -func (i InlineQuery) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return i.Response(b, ctx) -} - -func (i InlineQuery) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.InlineQuery == nil { - return false - } - - return i.Filter == nil || i.Filter(u.InlineQuery) -} - -func (i InlineQuery) Name() string { - return fmt.Sprintf("inlinequery_%p", i.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/message.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/message.go deleted file mode 100644 index 2267d78..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/message.go +++ /dev/null @@ -1,57 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type Message struct { - AllowEdited bool - AllowChannel bool - Filter filters.Message - Response Response -} - -func NewMessage(f filters.Message, r Response) Message { - return Message{ - AllowEdited: false, - AllowChannel: false, - Filter: f, - Response: r, - } -} - -func (m Message) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - // Normal incoming message in a group/private chat. - if u.Message != nil { - return m.Filter == nil || m.Filter(u.Message) - } - - // If edits are allowed, and message is edited. - if m.AllowEdited && u.EditedMessage != nil { - return m.Filter == nil || m.Filter(u.EditedMessage) - } - - // If channels are allowed, and message is a channel post. - if m.AllowChannel && u.ChannelPost != nil { - return m.Filter == nil || m.Filter(u.ChannelPost) - } - - // If edits AND channels are allowed, and message is a channel post. - if m.AllowChannel && m.AllowEdited && u.EditedChannelPost != nil { - return m.Filter == nil || m.Filter(u.EditedChannelPost) - } - - return false -} - -func (m Message) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return m.Response(b, ctx) -} - -func (m Message) Name() string { - return fmt.Sprintf("message_%p", m.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/mychatmember.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/mychatmember.go deleted file mode 100644 index 251ce64..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/mychatmember.go +++ /dev/null @@ -1,36 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type MyChatMember struct { - Response Response - Filter filters.ChatMember -} - -func NewMyChatMember(f filters.ChatMember, r Response) MyChatMember { - return MyChatMember{ - Response: r, - Filter: f, - } -} - -func (m MyChatMember) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.MyChatMember == nil { - return false - } - return m.Filter == nil || m.Filter(u.MyChatMember) -} - -func (m MyChatMember) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return m.Response(b, ctx) -} - -func (m MyChatMember) Name() string { - return fmt.Sprintf("mychatmember_%p", m.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/poll.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/poll.go deleted file mode 100644 index 137738d..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/poll.go +++ /dev/null @@ -1,36 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type Poll struct { - Filter filters.Poll - Response Response -} - -func NewPoll(f filters.Poll, r Response) Poll { - return Poll{ - Filter: f, - Response: r, - } -} - -func (r Poll) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.Poll == nil { - return false - } - return r.Filter == nil || r.Filter(u.Poll) -} - -func (r Poll) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return r.Response(b, ctx) -} - -func (r Poll) Name() string { - return fmt.Sprintf("poll_%p", r.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/pollanswer.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/pollanswer.go deleted file mode 100644 index ae4762d..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/pollanswer.go +++ /dev/null @@ -1,36 +0,0 @@ -package handlers - -import ( - "fmt" - - "github.com/PaulSonOfLars/gotgbot/v2" - "github.com/PaulSonOfLars/gotgbot/v2/ext" - "github.com/PaulSonOfLars/gotgbot/v2/ext/handlers/filters" -) - -type PollAnswer struct { - Filter filters.PollAnswer - Response Response -} - -func NewPollAnswer(f filters.PollAnswer, r Response) PollAnswer { - return PollAnswer{ - Filter: f, - Response: r, - } -} - -func (r PollAnswer) CheckUpdate(b *gotgbot.Bot, u *gotgbot.Update) bool { - if u.PollAnswer == nil { - return false - } - return r.Filter == nil || r.Filter(u.PollAnswer) -} - -func (r PollAnswer) HandleUpdate(b *gotgbot.Bot, ctx *ext.Context) error { - return r.Response(b, ctx) -} - -func (r PollAnswer) Name() string { - return fmt.Sprintf("pollanswer_%p", r.Response) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/queryvalidation.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/queryvalidation.go deleted file mode 100644 index 9e6cb4f..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/queryvalidation.go +++ /dev/null @@ -1,110 +0,0 @@ -package ext - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "net/url" - "sort" - "strings" -) - -// ValidateLoginQuery validates a login widget query. -// See https://core.telegram.org/widgets/login#checking-authorization for more details. -func ValidateLoginQuery(query url.Values, token string) (bool, error) { - tokenHash, err := getSHA256(token) - if err != nil { - return false, fmt.Errorf("failed to hash token: %w", err) - } - - return validateQuery(query, tokenHash) -} - -// ValidateWebAppInitData validates a webapp's initData field for safe use on the server-side. -// The initData field is stored as a query string, so this is converted and then validated. -// See https://core.telegram.org/bots/webapps#validating-data-received-via-the-web-app for more details. -func ValidateWebAppInitData(initData string, token string) (bool, error) { - query, err := url.ParseQuery(initData) - if err != nil { - return false, fmt.Errorf("failed to parse URL query: %w", err) - } - - return ValidateWebAppQuery(query, token) -} - -// ValidateWebAppQuery validates a webapp's initData query for safe use on the server side. -// The input is expected to be the parsed initData query string. -// See https://core.telegram.org/bots/webapps#validating-data-received-via-the-web-app for more details. -func ValidateWebAppQuery(query url.Values, token string) (bool, error) { - tokenHMAC, err := generateHMAC256(token, []byte("WebAppData")) - if err != nil { - return false, fmt.Errorf("failed to generate token HMAC: %w", err) - } - - return validateQuery(query, tokenHMAC) -} - -func validateQuery(query url.Values, secretKey []byte) (bool, error) { - // If no hash, we can't check; fail-fast. - hash := query.Get("hash") - if hash == "" { - // Should this be an error? - return false, nil - } - - // Make list of args for ordered sorting. - // len()-1, because we ignore the hash key. - args := make([]string, 0, len(query)-1) - for x, y := range query { - if x == "hash" { - // ignore the hash - continue - } - args = append(args, x+"="+y[0]) - } - - // Sort args to ensure consistency. - sort.Strings(args) - - // Join data with newline, as defined by telegram. - dataCheck := strings.Join(args, "\n") - - // Generate HMAC of expected data. - expectedHMAC, err := generateHMAC256(dataCheck, secretKey) - if err != nil { - return false, fmt.Errorf("failed to generate data HMAC: %w", err) - } - - // Hex encode expected hmac_256 value. - expectedHex := getHex(expectedHMAC) - - // Check hash matches as expected. - return hmac.Equal(expectedHex, []byte(hash)), nil -} - -func generateHMAC256(data string, secretKey []byte) ([]byte, error) { - hmac256Writer := hmac.New(sha256.New, secretKey) - _, err := hmac256Writer.Write([]byte(data)) - if err != nil { - return nil, err - } - - return hmac256Writer.Sum(nil), nil -} - -func getSHA256(data string) ([]byte, error) { - sha256Writer := sha256.New() - _, err := sha256Writer.Write([]byte(data)) - if err != nil { - return nil, err - } - - return sha256Writer.Sum(nil), nil -} - -func getHex(expectedHMAC []byte) []byte { - expectedHex := make([]byte, hex.EncodedLen(len(expectedHMAC))) - hex.Encode(expectedHex, expectedHMAC) - return expectedHex -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/updater.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/updater.go deleted file mode 100644 index 7a499bd..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/ext/updater.go +++ /dev/null @@ -1,313 +0,0 @@ -package ext - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "strconv" - "strings" - "time" - - "github.com/PaulSonOfLars/gotgbot/v2" -) - -var ErrMissingCertOrKeyFile = errors.New("missing certfile or keyfile") - -type Updater struct { - Dispatcher *Dispatcher - UpdateChan chan json.RawMessage - ErrorLog *log.Logger - - stopIdling chan bool - running chan bool - server *http.Server -} - -var errorLog = log.New(os.Stderr, "ERROR", log.LstdFlags) - -type UpdaterOpts struct { - ErrorLog *log.Logger - - DispatcherOpts DispatcherOpts -} - -// NewUpdater Creates a new Updater, as well as the necessary structures required for the associated Dispatcher. -func NewUpdater(opts *UpdaterOpts) Updater { - errLog := errorLog - var dispatcherOpts DispatcherOpts - - if opts != nil { - if opts.ErrorLog != nil { - errLog = opts.ErrorLog - } - - dispatcherOpts = opts.DispatcherOpts - } - - updateChan := make(chan json.RawMessage) - return Updater{ - ErrorLog: errLog, - Dispatcher: NewDispatcher(updateChan, &dispatcherOpts), - UpdateChan: updateChan, - } -} - -// PollingOpts represents the optional values to start long polling. -type PollingOpts struct { - // DropPendingUpdates decides whether or not to drop "pending" updates; these are updates which were sent before - // the bot was started. - DropPendingUpdates bool - // GetUpdatesOpts represents the opts passed to GetUpdates. - // Note: It is recommended you edit the values here when running in production environments. - // Changes might include: - // - Changing the "GetUpdatesOpts.AllowedUpates" to only refer to relevant updates - // - Using a non-0 "GetUpdatesOpts.Timeout" value. This is how "long" telegram will hold the long-polling call - // while waiting for new messages. A value of 0 causes telegram to reply immediately, which will then cause - // your bot to immediately ask for more updates. While this can seem fine, it will eventually causing - // telegram to delay your requests when left running over longer periods. If you are seeing lots - // of "context deadline exceeded" errors on GetUpdates, this is likely the cause. - // Keep in mind that a timeout of 10 does not mean you only get updates every 10s; by the nature of - // long-polling, Telegram responds to your request as soon as new messages are available. - // When setting this, it is recommended you set your PollingOpts.Timeout value to be slightly bigger (eg, +1). - GetUpdatesOpts gotgbot.GetUpdatesOpts -} - -// StartPolling starts polling updates from telegram using the getUdpates long-polling method. -// See the PollingOpts for optional values to set in production environments. -func (u *Updater) StartPolling(b *gotgbot.Bot, opts *PollingOpts) error { - // TODO: De-duplicate this code. - // This logic is currently mostly duplicated over from the generated getUpdates code. - // This is a performance improvement to avoid: - // - needing to re-allocate new url.values structs. - // - needing to convert the opt values to strings to pass to the values. - // - unnecessary unmarshalling of the (possibly multiple) full Update structs. - // Yes, this also makes me sad. :/ - v := map[string]string{} - dropPendingUpdates := false - var reqOpts *gotgbot.RequestOpts - - if opts != nil { - dropPendingUpdates = opts.DropPendingUpdates - if opts.GetUpdatesOpts.RequestOpts != nil { - reqOpts = opts.GetUpdatesOpts.RequestOpts - } - - v["offset"] = strconv.FormatInt(opts.GetUpdatesOpts.Offset, 10) - v["limit"] = strconv.FormatInt(opts.GetUpdatesOpts.Limit, 10) - v["timeout"] = strconv.FormatInt(opts.GetUpdatesOpts.Timeout, 10) - if opts.GetUpdatesOpts.AllowedUpdates != nil { - bs, err := json.Marshal(opts.GetUpdatesOpts.AllowedUpdates) - if err != nil { - return fmt.Errorf("failed to marshal field allowed_updates: %w", err) - } - v["allowed_updates"] = string(bs) - } - } - - go u.Dispatcher.Start(b) - go u.pollingLoop(b, reqOpts, dropPendingUpdates, v) - - return nil -} - -func (u *Updater) pollingLoop(b *gotgbot.Bot, opts *gotgbot.RequestOpts, dropPendingUpdates bool, v map[string]string) { - - // if dropPendingUpdates, force the offset to -1 - if dropPendingUpdates { - v["offset"] = "-1" - } - - var offset int64 - - u.running = make(chan bool) - for { - select { - case <-u.running: - // if anything comes in, stop. - return - default: - // continue as usual - } - - r, err := b.Request("getUpdates", v, nil, opts) - if err != nil { - u.ErrorLog.Println("failed to get updates; sleeping 1s: " + err.Error()) - time.Sleep(time.Second) - continue - - } else if r == nil { - dropPendingUpdates = false - continue - } - - var rawUpdates []json.RawMessage - if err := json.Unmarshal(r, &rawUpdates); err != nil { - u.ErrorLog.Println("failed to unmarshal updates: " + err.Error()) - continue - } - - if len(rawUpdates) == 0 { - dropPendingUpdates = false - continue - } - - var lastUpdate struct { - UpdateId int64 `json:"update_id"` - } - - if err := json.Unmarshal(rawUpdates[len(rawUpdates)-1], &lastUpdate); err != nil { - u.ErrorLog.Println("failed to unmarshal last update: " + err.Error()) - continue - } - - offset = lastUpdate.UpdateId + 1 - v["offset"] = strconv.FormatInt(offset, 10) - if dropPendingUpdates { - // Setting the offset to -1 gets just the last update; this should be skipped too. - dropPendingUpdates = false - continue - } - - for _, updData := range rawUpdates { - temp := updData // use new mem address to avoid loop conflicts - u.UpdateChan <- temp - } - } -} - -// Idle starts an infinite loop to avoid the program exciting while the background threads handle updates. -func (u *Updater) Idle() { - u.stopIdling = make(chan bool) - - for { - select { - case <-u.stopIdling: - return - default: - // continue as usual - } - time.Sleep(1 * time.Second) - } -} - -// Stop stops the current updater and dispatcher instances. -func (u *Updater) Stop() error { - // if server, this is running on webhooks; shutdown the server - if u.server != nil { - err := u.server.Shutdown(context.Background()) - if err != nil { - return fmt.Errorf("failed to shutdown server: %w", err) - } - } - - if u.running != nil { - // stop the polling loop - u.running <- false - close(u.running) - } - - close(u.UpdateChan) - - u.Dispatcher.Stop() - - if u.stopIdling != nil { - // stop idling - u.stopIdling <- false - close(u.stopIdling) - } - return nil -} - -// StartWebhook Starts the webhook server. The opts parameter allows for specifying TLS settings. -func (u *Updater) StartWebhook(b *gotgbot.Bot, opts WebhookOpts) error { - var tls bool - if opts.CertFile == "" && opts.KeyFile == "" { - tls = false - } else if opts.CertFile != "" && opts.KeyFile != "" { - tls = true - } else { - return ErrMissingCertOrKeyFile - } - - go u.Dispatcher.Start(b) - - mux := http.NewServeMux() - mux.HandleFunc("/"+opts.URLPath, func(w http.ResponseWriter, r *http.Request) { - if opts.SecretToken != "" && opts.SecretToken != r.Header.Get("X-Telegram-Bot-Api-Secret-Token") { - // Drop any updates from invalid secret tokens. - w.WriteHeader(http.StatusUnauthorized) - return - } - bytes, _ := ioutil.ReadAll(r.Body) - u.UpdateChan <- bytes - }) - - u.server = &http.Server{ - Addr: opts.GetListenAddr(), - Handler: mux, - ReadTimeout: opts.ReadTimeout, - ReadHeaderTimeout: opts.ReadHeaderTimeout, - } - - go func() { - var err error - if tls { - err = u.server.ListenAndServeTLS(opts.CertFile, opts.KeyFile) - } else { - err = u.server.ListenAndServe() - } - if err != nil && errors.Is(err, http.ErrServerClosed) { - panic("http server failed: " + err.Error()) - } - }() - - return nil -} - -// WebhookOpts represent various fields that are needed for configuring the local webhook server. -type WebhookOpts struct { - // Listen is the address to listen on (eg: localhost, 0.0.0.0, etc). - Listen string - // Port is the port listen on (eg 443, 8443, etc). - Port int - // URLPath defines the path to listen at; eg /. - // Using the bot token here is often a good idea, as it is a secret known only by telegram. - URLPath string - // ReadTimeout is passed to the http server to limit the time it takes to read an incoming request. - // See http.Server for more details. - ReadTimeout time.Duration - // ReadHeaderTimeout is passed to the http server to limit the time it takes to read the headers of an incoming - // request. - // See http.Server for more details. - ReadHeaderTimeout time.Duration - - // HTTPS cert and key files for custom signed certificates - CertFile string - KeyFile string - - // The secret token used in the Bot.SetWebhook call, which can be used to ensure that the request comes from a - // webhook set by you. - SecretToken string -} - -// GetListenAddr returns the local listening address, including port. -func (w *WebhookOpts) GetListenAddr() string { - if w.Listen == "" { - w.Listen = "0.0.0.0" - } - if w.Port == 0 { - w.Port = 443 - } - return fmt.Sprintf("%s:%d", w.Listen, w.Port) -} - -// GetWebhookURL returns the domain in the form domain/path. -// eg: example.com/super_secret_token -func (w *WebhookOpts) GetWebhookURL(domain string) string { - return fmt.Sprintf("%s/%s", strings.TrimSuffix(domain, "/"), w.URLPath) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/formatting.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/formatting.go deleted file mode 100644 index 667d557..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/formatting.go +++ /dev/null @@ -1,291 +0,0 @@ -package gotgbot - -import ( - "html" - "strconv" - "strings" - "unicode" - "unicode/utf16" -) - -var mdMap = map[string]string{ - "bold": "*", - "italic": "_", - "code": "`", -} - -var mdV2Map = map[string]string{ - "bold": "*", - "italic": "_", - "code": "`", - "pre": "```", - "underline": "__", - "strikethrough": "~", - "spoiler": "||", -} - -var htmlMap = map[string]string{ - "bold": "b", - "italic": "i", - "code": "code", - "pre": "pre", - "underline": "u", - "strikethrough": "s", - "spoiler": "span class=\"tg-spoiler\"", -} - -// OriginalMD gets the original markdown formatting of a message text. -func (m Message) OriginalMD() string { - return getOrigMsgMD(utf16.Encode([]rune(m.Text)), m.Entities) -} - -// OriginalMDV2 gets the original markdownV2 formatting of a message text. -func (m Message) OriginalMDV2() string { - return getOrigMsgMDV2(utf16.Encode([]rune(m.Text)), m.Entities) -} - -// OriginalHTML gets the original HTML formatting of a message text. -func (m Message) OriginalHTML() string { - return getOrigMsgHTML(utf16.Encode([]rune(m.Text)), m.Entities) -} - -// OriginalCaptionMD gets the original markdown formatting of a message caption. -func (m Message) OriginalCaptionMD() string { - return getOrigMsgMD(utf16.Encode([]rune(m.Caption)), m.CaptionEntities) -} - -// OriginalCaptionMDV2 gets the original markdownV2 formatting of a message caption. -func (m Message) OriginalCaptionMDV2() string { - return getOrigMsgMDV2(utf16.Encode([]rune(m.Caption)), m.CaptionEntities) -} - -// OriginalCaptionHTML gets the original HTML formatting of a message caption. -func (m Message) OriginalCaptionHTML() string { - return getOrigMsgHTML(utf16.Encode([]rune(m.Caption)), m.CaptionEntities) -} - -// Does not support nesting. only look at upper entities. -func getOrigMsgMD(utf16Data []uint16, ents []MessageEntity) string { - out := strings.Builder{} - prev := int64(0) - for _, ent := range getUpperEntities(ents) { - newPrev := ent.Offset + ent.Length - prevText := string(utf16.Decode(utf16Data[prev:ent.Offset])) - - text := utf16.Decode(utf16Data[ent.Offset:newPrev]) - pre, cleanCntnt, post := splitEdgeWhitespace(string(text)) - cleanCntntRune := []rune(cleanCntnt) - - switch ent.Type { - case "bold", "italic", "code": - out.WriteString(prevText + pre + mdMap[ent.Type] + escapeContainedMDV1(cleanCntntRune, []rune(mdMap[ent.Type])) + mdMap[ent.Type] + post) - case "text_mention": - out.WriteString(prevText + pre + "[" + escapeContainedMDV1(cleanCntntRune, []rune("[]()")) + "](tg://user?id=" + strconv.FormatInt(ent.User.Id, 10) + ")" + post) - case "text_link": - out.WriteString(prevText + pre + "[" + escapeContainedMDV1(cleanCntntRune, []rune("[]()")) + "](" + ent.Url + ")" + post) - default: - continue - } - prev = newPrev - } - - out.WriteString(string(utf16.Decode(utf16Data[prev:]))) - return out.String() -} - -func getOrigMsgHTML(utf16Data []uint16, ents []MessageEntity) string { - if len(ents) == 0 { - return html.EscapeString(string(utf16.Decode(utf16Data))) - } - - bd := strings.Builder{} - prev := int64(0) - for _, e := range getUpperEntities(ents) { - data, end := fillNestedHTML(utf16Data, e, prev, getChildEntities(e, ents)) - bd.WriteString(data) - prev = end - } - - bd.WriteString(html.EscapeString(string(utf16.Decode(utf16Data[prev:])))) - return bd.String() -} - -func getOrigMsgMDV2(utf16Data []uint16, ents []MessageEntity) string { - if len(ents) == 0 { - return string(utf16.Decode(utf16Data)) - } - - bd := strings.Builder{} - prev := int64(0) - for _, e := range getUpperEntities(ents) { - data, end := fillNestedMarkdownV2(utf16Data, e, prev, getChildEntities(e, ents)) - bd.WriteString(data) - prev = end - } - - bd.WriteString(string(utf16.Decode(utf16Data[prev:]))) - return bd.String() -} - -func fillNestedHTML(data []uint16, ent MessageEntity, start int64, entities []MessageEntity) (string, int64) { - entEnd := ent.Offset + ent.Length - if len(entities) == 0 || entEnd < entities[0].Offset { - // no nesting; just return straight away and move to next. - return writeFinalHTML(data, ent, start, html.EscapeString(string(utf16.Decode(data[ent.Offset:entEnd])))), entEnd - } - subPrev := ent.Offset - subEnd := ent.Offset - bd := strings.Builder{} - for _, e := range getUpperEntities(entities) { - if e.Offset < subEnd || e == ent { - continue - } - if e.Offset >= entEnd { - break - } - - out, end := fillNestedHTML(data, e, subPrev, getChildEntities(e, entities)) - bd.WriteString(out) - subPrev = end - } - - bd.WriteString(html.EscapeString(string(utf16.Decode(data[subPrev:entEnd])))) - - return writeFinalHTML(data, ent, start, bd.String()), entEnd -} - -func fillNestedMarkdownV2(data []uint16, ent MessageEntity, start int64, entities []MessageEntity) (string, int64) { - entEnd := ent.Offset + ent.Length - if len(entities) == 0 || entEnd < entities[0].Offset { - // no nesting; just return straight away and move to next. - return writeFinalMarkdownV2(data, ent, start, string(utf16.Decode(data[ent.Offset:entEnd]))), entEnd - } - subPrev := ent.Offset - subEnd := ent.Offset - bd := strings.Builder{} - for _, e := range getUpperEntities(entities) { - if e.Offset < subEnd || e == ent { - continue - } - if e.Offset >= entEnd { - break - } - - out, end := fillNestedMarkdownV2(data, e, subPrev, getChildEntities(e, entities)) - bd.WriteString(out) - subPrev = end - } - - bd.WriteString(string(utf16.Decode(data[subPrev:entEnd]))) - - return writeFinalMarkdownV2(data, ent, start, bd.String()), entEnd -} - -func writeFinalHTML(data []uint16, ent MessageEntity, start int64, cntnt string) string { - prevText := html.EscapeString(string(utf16.Decode(data[start:ent.Offset]))) - switch ent.Type { - case "bold", "italic", "code", "underline", "strikethrough", "spoiler": - return prevText + "<" + htmlMap[ent.Type] + ">" + cntnt + "" - case "pre": - //
text
- if ent.Language == "" { - return prevText + "
" + cntnt + "
" - } - //
text
- return prevText + `
` + cntnt + "
" - case "text_mention": - return prevText + `` + cntnt + "" - case "text_link": - return prevText + `` + cntnt + "" - default: - return prevText + cntnt - } -} - -// closeHTMLTag makes sure to generate the correct HTML closing tag for a given opening tag. -func closeHTMLTag(s string) string { - if !strings.HasPrefix(s, "span") { - return s - } - return "span" -} - -func writeFinalMarkdownV2(data []uint16, ent MessageEntity, start int64, cntnt string) string { - prevText := string(utf16.Decode(data[start:ent.Offset])) - pre, cleanCntnt, post := splitEdgeWhitespace(cntnt) - switch ent.Type { - case "bold", "italic", "code", "underline", "strikethrough", "pre", "spoiler": - return prevText + pre + mdV2Map[ent.Type] + cleanCntnt + mdV2Map[ent.Type] + post - case "text_mention": - return prevText + pre + "[" + cleanCntnt + "](tg://user?id=" + strconv.FormatInt(ent.User.Id, 10) + ")" + post - case "text_link": - return prevText + pre + "[" + cleanCntnt + "](" + ent.Url + ")" + post - default: - return prevText + cntnt - } -} - -func getUpperEntities(ents []MessageEntity) []MessageEntity { - prev := int64(0) - uppers := make([]MessageEntity, 0, len(ents)) - for _, e := range ents { - if e.Offset < prev { - continue - } - uppers = append(uppers, e) - prev = e.Offset + e.Length - } - return uppers -} - -func getChildEntities(ent MessageEntity, ents []MessageEntity) []MessageEntity { - end := ent.Offset + ent.Length - children := make([]MessageEntity, 0, len(ents)) - for _, e := range ents { - if e.Offset < ent.Offset || e == ent { - continue - } - if e.Offset >= end { - break - } - children = append(children, e) - } - return children -} - -func splitEdgeWhitespace(text string) (pre string, cntnt string, post string) { - bd := strings.Builder{} - rText := []rune(text) - for i := 0; i < len(rText) && unicode.IsSpace(rText[i]); i++ { - bd.WriteRune(rText[i]) - } - pre = bd.String() - text = strings.TrimPrefix(text, pre) - - bd.Reset() - for i := len(rText) - 1; i >= 0 && unicode.IsSpace(rText[i]); i-- { - bd.WriteRune(rText[i]) - } - post = bd.String() - return pre, strings.TrimSuffix(text, post), post -} - -func escapeContainedMDV1(data []rune, mdType []rune) string { - out := strings.Builder{} - for _, x := range data { - if contains(x, mdType) { - out.WriteRune('\\') - } - out.WriteRune(x) - } - return out.String() -} - -func contains(r rune, rs []rune) bool { - for _, rr := range rs { - if r == rr { - return true - } - } - return false -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_consts.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_consts.go deleted file mode 100644 index a64238a..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_consts.go +++ /dev/null @@ -1,37 +0,0 @@ -// THIS FILE IS AUTOGENERATED. DO NOT EDIT. -// Regen by running 'go generate' in the repo root. - -package gotgbot - -// The consts listed below represent all the update types that can be requested from telegram. -const ( - UpdateTypeMessage = "message" - UpdateTypeEditedMessage = "edited_message" - UpdateTypeChannelPost = "channel_post" - UpdateTypeEditedChannelPost = "edited_channel_post" - UpdateTypeInlineQuery = "inline_query" - UpdateTypeChosenInlineResult = "chosen_inline_result" - UpdateTypeCallbackQuery = "callback_query" - UpdateTypeShippingQuery = "shipping_query" - UpdateTypePreCheckoutQuery = "pre_checkout_query" - UpdateTypePoll = "poll" - UpdateTypePollAnswer = "poll_answer" - UpdateTypeMyChatMember = "my_chat_member" - UpdateTypeChatMember = "chat_member" - UpdateTypeChatJoinRequest = "chat_join_request" -) - -// The consts listed below represent all the parse_mode options that can be sent to telegram. -const ( - ParseModeHTML = "HTML" - ParseModeMarkdownV2 = "MarkdownV2" - ParseModeMarkdown = "Markdown" - ParseModeNone = "" -) - -// The consts listed below represent all the sticker types that can be obtained from telegram. -const ( - StickerTypeRegular = "regular" - StickerTypeMask = "mask" - StickerTypeCustomEmoji = "custom_emoji" -) diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_helpers.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_helpers.go deleted file mode 100644 index 168584d..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_helpers.go +++ /dev/null @@ -1,334 +0,0 @@ -// THIS FILE IS AUTOGENERATED. DO NOT EDIT. -// Regen by running 'go generate' in the repo root. - -package gotgbot - -// Answer Helper method for Bot.AnswerCallbackQuery -func (cq CallbackQuery) Answer(b *Bot, opts *AnswerCallbackQueryOpts) (bool, error) { - return b.AnswerCallbackQuery(cq.Id, opts) -} - -// Answer Helper method for Bot.AnswerInlineQuery -func (iq InlineQuery) Answer(b *Bot, results []InlineQueryResult, opts *AnswerInlineQueryOpts) (bool, error) { - return b.AnswerInlineQuery(iq.Id, results, opts) -} - -// Answer Helper method for Bot.AnswerPreCheckoutQuery -func (pcq PreCheckoutQuery) Answer(b *Bot, ok bool, opts *AnswerPreCheckoutQueryOpts) (bool, error) { - return b.AnswerPreCheckoutQuery(pcq.Id, ok, opts) -} - -// Answer Helper method for Bot.AnswerShippingQuery -func (sq ShippingQuery) Answer(b *Bot, ok bool, opts *AnswerShippingQueryOpts) (bool, error) { - return b.AnswerShippingQuery(sq.Id, ok, opts) -} - -// ApproveJoinRequest Helper method for Bot.ApproveChatJoinRequest -func (c Chat) ApproveJoinRequest(b *Bot, userId int64, opts *ApproveChatJoinRequestOpts) (bool, error) { - return b.ApproveChatJoinRequest(c.Id, userId, opts) -} - -// BanMember Helper method for Bot.BanChatMember -func (c Chat) BanMember(b *Bot, userId int64, opts *BanChatMemberOpts) (bool, error) { - return b.BanChatMember(c.Id, userId, opts) -} - -// BanSenderChat Helper method for Bot.BanChatSenderChat -func (c Chat) BanSenderChat(b *Bot, senderChatId int64, opts *BanChatSenderChatOpts) (bool, error) { - return b.BanChatSenderChat(c.Id, senderChatId, opts) -} - -// Copy Helper method for Bot.CopyMessage -func (m Message) Copy(b *Bot, chatId int64, opts *CopyMessageOpts) (*MessageId, error) { - return b.CopyMessage(chatId, m.Chat.Id, m.MessageId, opts) -} - -// CreateInviteLink Helper method for Bot.CreateChatInviteLink -func (c Chat) CreateInviteLink(b *Bot, opts *CreateChatInviteLinkOpts) (*ChatInviteLink, error) { - return b.CreateChatInviteLink(c.Id, opts) -} - -// DeclineJoinRequest Helper method for Bot.DeclineChatJoinRequest -func (c Chat) DeclineJoinRequest(b *Bot, userId int64, opts *DeclineChatJoinRequestOpts) (bool, error) { - return b.DeclineChatJoinRequest(c.Id, userId, opts) -} - -// DeletePhoto Helper method for Bot.DeleteChatPhoto -func (c Chat) DeletePhoto(b *Bot, opts *DeleteChatPhotoOpts) (bool, error) { - return b.DeleteChatPhoto(c.Id, opts) -} - -// DeleteStickerSet Helper method for Bot.DeleteChatStickerSet -func (c Chat) DeleteStickerSet(b *Bot, opts *DeleteChatStickerSetOpts) (bool, error) { - return b.DeleteChatStickerSet(c.Id, opts) -} - -// Delete Helper method for Bot.DeleteMessage -func (m Message) Delete(b *Bot, opts *DeleteMessageOpts) (bool, error) { - return b.DeleteMessage(m.Chat.Id, m.MessageId, opts) -} - -// EditInviteLink Helper method for Bot.EditChatInviteLink -func (c Chat) EditInviteLink(b *Bot, inviteLink string, opts *EditChatInviteLinkOpts) (*ChatInviteLink, error) { - return b.EditChatInviteLink(c.Id, inviteLink, opts) -} - -// EditCaption Helper method for Bot.EditMessageCaption -func (m Message) EditCaption(b *Bot, opts *EditMessageCaptionOpts) (*Message, bool, error) { - if opts == nil { - opts = &EditMessageCaptionOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = m.Chat.Id - } - if opts.MessageId == 0 { - opts.MessageId = m.MessageId - } - - return b.EditMessageCaption(opts) -} - -// EditLiveLocation Helper method for Bot.EditMessageLiveLocation -func (m Message) EditLiveLocation(b *Bot, latitude float64, longitude float64, opts *EditMessageLiveLocationOpts) (*Message, bool, error) { - if opts == nil { - opts = &EditMessageLiveLocationOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = m.Chat.Id - } - if opts.MessageId == 0 { - opts.MessageId = m.MessageId - } - - return b.EditMessageLiveLocation(latitude, longitude, opts) -} - -// EditMedia Helper method for Bot.EditMessageMedia -func (m Message) EditMedia(b *Bot, media InputMedia, opts *EditMessageMediaOpts) (*Message, bool, error) { - if opts == nil { - opts = &EditMessageMediaOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = m.Chat.Id - } - if opts.MessageId == 0 { - opts.MessageId = m.MessageId - } - - return b.EditMessageMedia(media, opts) -} - -// EditReplyMarkup Helper method for Bot.EditMessageReplyMarkup -func (m Message) EditReplyMarkup(b *Bot, opts *EditMessageReplyMarkupOpts) (*Message, bool, error) { - if opts == nil { - opts = &EditMessageReplyMarkupOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = m.Chat.Id - } - if opts.MessageId == 0 { - opts.MessageId = m.MessageId - } - - return b.EditMessageReplyMarkup(opts) -} - -// EditText Helper method for Bot.EditMessageText -func (m Message) EditText(b *Bot, text string, opts *EditMessageTextOpts) (*Message, bool, error) { - if opts == nil { - opts = &EditMessageTextOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = m.Chat.Id - } - if opts.MessageId == 0 { - opts.MessageId = m.MessageId - } - - return b.EditMessageText(text, opts) -} - -// ExportInviteLink Helper method for Bot.ExportChatInviteLink -func (c Chat) ExportInviteLink(b *Bot, opts *ExportChatInviteLinkOpts) (string, error) { - return b.ExportChatInviteLink(c.Id, opts) -} - -// Forward Helper method for Bot.ForwardMessage -func (m Message) Forward(b *Bot, chatId int64, opts *ForwardMessageOpts) (*Message, error) { - return b.ForwardMessage(chatId, m.Chat.Id, m.MessageId, opts) -} - -// Get Helper method for Bot.GetChat -func (c Chat) Get(b *Bot, opts *GetChatOpts) (*Chat, error) { - return b.GetChat(c.Id, opts) -} - -// GetAdministrators Helper method for Bot.GetChatAdministrators -func (c Chat) GetAdministrators(b *Bot, opts *GetChatAdministratorsOpts) ([]ChatMember, error) { - return b.GetChatAdministrators(c.Id, opts) -} - -// GetMember Helper method for Bot.GetChatMember -func (c Chat) GetMember(b *Bot, userId int64, opts *GetChatMemberOpts) (ChatMember, error) { - return b.GetChatMember(c.Id, userId, opts) -} - -// GetMemberCount Helper method for Bot.GetChatMemberCount -func (c Chat) GetMemberCount(b *Bot, opts *GetChatMemberCountOpts) (int64, error) { - return b.GetChatMemberCount(c.Id, opts) -} - -// GetMenuButton Helper method for Bot.GetChatMenuButton -func (c Chat) GetMenuButton(b *Bot, opts *GetChatMenuButtonOpts) (MenuButton, error) { - if opts == nil { - opts = &GetChatMenuButtonOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = c.Id - } - - return b.GetChatMenuButton(opts) -} - -// Get Helper method for Bot.GetFile -func (f File) Get(b *Bot, opts *GetFileOpts) (*File, error) { - return b.GetFile(f.FileId, opts) -} - -// GetProfilePhotos Helper method for Bot.GetUserProfilePhotos -func (u User) GetProfilePhotos(b *Bot, opts *GetUserProfilePhotosOpts) (*UserProfilePhotos, error) { - return b.GetUserProfilePhotos(u.Id, opts) -} - -// Leave Helper method for Bot.LeaveChat -func (c Chat) Leave(b *Bot, opts *LeaveChatOpts) (bool, error) { - return b.LeaveChat(c.Id, opts) -} - -// PinMessage Helper method for Bot.PinChatMessage -func (c Chat) PinMessage(b *Bot, messageId int64, opts *PinChatMessageOpts) (bool, error) { - return b.PinChatMessage(c.Id, messageId, opts) -} - -// Pin Helper method for Bot.PinChatMessage -func (m Message) Pin(b *Bot, opts *PinChatMessageOpts) (bool, error) { - return b.PinChatMessage(m.Chat.Id, m.MessageId, opts) -} - -// PromoteMember Helper method for Bot.PromoteChatMember -func (c Chat) PromoteMember(b *Bot, userId int64, opts *PromoteChatMemberOpts) (bool, error) { - return b.PromoteChatMember(c.Id, userId, opts) -} - -// RestrictMember Helper method for Bot.RestrictChatMember -func (c Chat) RestrictMember(b *Bot, userId int64, permissions ChatPermissions, opts *RestrictChatMemberOpts) (bool, error) { - return b.RestrictChatMember(c.Id, userId, permissions, opts) -} - -// RevokeInviteLink Helper method for Bot.RevokeChatInviteLink -func (c Chat) RevokeInviteLink(b *Bot, inviteLink string, opts *RevokeChatInviteLinkOpts) (*ChatInviteLink, error) { - return b.RevokeChatInviteLink(c.Id, inviteLink, opts) -} - -// SendAction Helper method for Bot.SendChatAction -func (c Chat) SendAction(b *Bot, action string, opts *SendChatActionOpts) (bool, error) { - return b.SendChatAction(c.Id, action, opts) -} - -// SetAdministratorCustomTitle Helper method for Bot.SetChatAdministratorCustomTitle -func (c Chat) SetAdministratorCustomTitle(b *Bot, userId int64, customTitle string, opts *SetChatAdministratorCustomTitleOpts) (bool, error) { - return b.SetChatAdministratorCustomTitle(c.Id, userId, customTitle, opts) -} - -// SetDescription Helper method for Bot.SetChatDescription -func (c Chat) SetDescription(b *Bot, opts *SetChatDescriptionOpts) (bool, error) { - return b.SetChatDescription(c.Id, opts) -} - -// SetMenuButton Helper method for Bot.SetChatMenuButton -func (c Chat) SetMenuButton(b *Bot, opts *SetChatMenuButtonOpts) (bool, error) { - if opts == nil { - opts = &SetChatMenuButtonOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = c.Id - } - - return b.SetChatMenuButton(opts) -} - -// SetPermissions Helper method for Bot.SetChatPermissions -func (c Chat) SetPermissions(b *Bot, permissions ChatPermissions, opts *SetChatPermissionsOpts) (bool, error) { - return b.SetChatPermissions(c.Id, permissions, opts) -} - -// SetPhoto Helper method for Bot.SetChatPhoto -func (c Chat) SetPhoto(b *Bot, photo InputFile, opts *SetChatPhotoOpts) (bool, error) { - return b.SetChatPhoto(c.Id, photo, opts) -} - -// SetStickerSet Helper method for Bot.SetChatStickerSet -func (c Chat) SetStickerSet(b *Bot, stickerSetName string, opts *SetChatStickerSetOpts) (bool, error) { - return b.SetChatStickerSet(c.Id, stickerSetName, opts) -} - -// SetTitle Helper method for Bot.SetChatTitle -func (c Chat) SetTitle(b *Bot, title string, opts *SetChatTitleOpts) (bool, error) { - return b.SetChatTitle(c.Id, title, opts) -} - -// StopLiveLocation Helper method for Bot.StopMessageLiveLocation -func (m Message) StopLiveLocation(b *Bot, opts *StopMessageLiveLocationOpts) (*Message, bool, error) { - if opts == nil { - opts = &StopMessageLiveLocationOpts{} - } - - if opts.ChatId == 0 { - opts.ChatId = m.Chat.Id - } - if opts.MessageId == 0 { - opts.MessageId = m.MessageId - } - - return b.StopMessageLiveLocation(opts) -} - -// UnbanMember Helper method for Bot.UnbanChatMember -func (c Chat) UnbanMember(b *Bot, userId int64, opts *UnbanChatMemberOpts) (bool, error) { - return b.UnbanChatMember(c.Id, userId, opts) -} - -// UnbanSenderChat Helper method for Bot.UnbanChatSenderChat -func (c Chat) UnbanSenderChat(b *Bot, senderChatId int64, opts *UnbanChatSenderChatOpts) (bool, error) { - return b.UnbanChatSenderChat(c.Id, senderChatId, opts) -} - -// UnpinAllMessages Helper method for Bot.UnpinAllChatMessages -func (c Chat) UnpinAllMessages(b *Bot, opts *UnpinAllChatMessagesOpts) (bool, error) { - return b.UnpinAllChatMessages(c.Id, opts) -} - -// UnpinMessage Helper method for Bot.UnpinChatMessage -func (c Chat) UnpinMessage(b *Bot, opts *UnpinChatMessageOpts) (bool, error) { - return b.UnpinChatMessage(c.Id, opts) -} - -// Unpin Helper method for Bot.UnpinChatMessage -func (m Message) Unpin(b *Bot, opts *UnpinChatMessageOpts) (bool, error) { - if opts == nil { - opts = &UnpinChatMessageOpts{} - } - - if opts.MessageId == 0 { - opts.MessageId = m.MessageId - } - - return b.UnpinChatMessage(m.Chat.Id, opts) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_methods.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_methods.go deleted file mode 100644 index f778e0e..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_methods.go +++ /dev/null @@ -1,4912 +0,0 @@ -// THIS FILE IS AUTOGENERATED. DO NOT EDIT. -// Regen by running 'go generate' in the repo root. - -package gotgbot - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "strconv" -) - -// AddStickerToSetOpts is the set of optional fields for Bot.AddStickerToSet. -type AddStickerToSetOpts struct { - // PNG image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - PngSticker InputFile - // TGS animation with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#animated-sticker-requirements for technical requirements - TgsSticker InputFile - // WEBM video with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#video-sticker-requirements for technical requirements - WebmSticker InputFile - // A JSON-serialized object for position where the mask should be placed on faces - MaskPosition MaskPosition - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// AddStickerToSet Use this method to add a new sticker to a set created by the bot. You must use exactly one of the fields png_sticker, tgs_sticker, or webm_sticker. Animated stickers can be added to animated sticker sets and only to them. Animated sticker sets can have up to 50 stickers. Static sticker sets can have up to 120 stickers. Returns True on success. -// - userId (type int64): User identifier of sticker set owner -// - name (type string): Sticker set name -// - emojis (type string): One or more emoji corresponding to the sticker -// - opts (type AddStickerToSetOpts): All optional parameters. -// https://core.telegram.org/bots/api#addstickertoset -func (bot *Bot) AddStickerToSet(userId int64, name string, emojis string, opts *AddStickerToSetOpts) (bool, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["user_id"] = strconv.FormatInt(userId, 10) - v["name"] = name - v["emojis"] = emojis - if opts != nil { - if opts.PngSticker != nil { - switch m := opts.PngSticker.(type) { - case string: - v["png_sticker"] = m - - case NamedReader: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = m - - case io.Reader: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = NamedFile{File: m} - - case []byte: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.PngSticker) - } - } - if opts.TgsSticker != nil { - switch m := opts.TgsSticker.(type) { - case NamedReader: - v["tgs_sticker"] = "attach://tgs_sticker" - data["tgs_sticker"] = m - - case io.Reader: - v["tgs_sticker"] = "attach://tgs_sticker" - data["tgs_sticker"] = NamedFile{File: m} - - case []byte: - v["tgs_sticker"] = "attach://tgs_sticker" - data["tgs_sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.TgsSticker) - } - } - if opts.WebmSticker != nil { - switch m := opts.WebmSticker.(type) { - case NamedReader: - v["webm_sticker"] = "attach://webm_sticker" - data["webm_sticker"] = m - - case io.Reader: - v["webm_sticker"] = "attach://webm_sticker" - data["webm_sticker"] = NamedFile{File: m} - - case []byte: - v["webm_sticker"] = "attach://webm_sticker" - data["webm_sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.WebmSticker) - } - } - bs, err := json.Marshal(opts.MaskPosition) - if err != nil { - return false, fmt.Errorf("failed to marshal field mask_position: %w", err) - } - v["mask_position"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("addStickerToSet", v, data, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// AnswerCallbackQueryOpts is the set of optional fields for Bot.AnswerCallbackQuery. -type AnswerCallbackQueryOpts struct { - // Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters - Text string - // If True, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to false. - ShowAlert bool - // URL that will be opened by the user's client. If you have created a Game and accepted the conditions via @BotFather, specify the URL that opens your game - note that this will only work if the query comes from a callback_game button. Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter. - Url string - // The maximum amount of time in seconds that the result of the callback query may be cached client-side. Telegram apps will support caching starting in version 3.14. Defaults to 0. - CacheTime int64 - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// AnswerCallbackQuery Use this method to send answers to callback queries sent from inline keyboards. The answer will be displayed to the user as a notification at the top of the chat screen or as an alert. On success, True is returned. -// - callbackQueryId (type string): Unique identifier for the query to be answered -// - opts (type AnswerCallbackQueryOpts): All optional parameters. -// https://core.telegram.org/bots/api#answercallbackquery -func (bot *Bot) AnswerCallbackQuery(callbackQueryId string, opts *AnswerCallbackQueryOpts) (bool, error) { - v := map[string]string{} - v["callback_query_id"] = callbackQueryId - if opts != nil { - v["text"] = opts.Text - v["show_alert"] = strconv.FormatBool(opts.ShowAlert) - v["url"] = opts.Url - if opts.CacheTime != 0 { - v["cache_time"] = strconv.FormatInt(opts.CacheTime, 10) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("answerCallbackQuery", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// AnswerInlineQueryOpts is the set of optional fields for Bot.AnswerInlineQuery. -type AnswerInlineQueryOpts struct { - // The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. - CacheTime int64 - // Pass True if results may be cached on the server side only for the user that sent the query. By default, results may be returned to any user who sends the same query - IsPersonal bool - // Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don't support pagination. Offset length can't exceed 64 bytes. - NextOffset string - // If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter - SwitchPmText string - // Deep-linking parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed. Example: An inline bot that sends YouTube videos can ask the user to connect the bot to their YouTube account to adapt search results accordingly. To do this, it displays a 'Connect your YouTube account' button above the results, or even before showing any. The user presses the button, switches to a private chat with the bot and, in doing so, passes a start parameter that instructs the bot to return an OAuth link. Once done, the bot can offer a switch_inline button so that the user can easily return to the chat where they wanted to use the bot's inline capabilities. - SwitchPmParameter string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// AnswerInlineQuery Use this method to send answers to an inline query. On success, True is returned. -// No more than 50 results per query are allowed. -// - inlineQueryId (type string): Unique identifier for the answered query -// - results (type []InlineQueryResult): A JSON-serialized array of results for the inline query -// - opts (type AnswerInlineQueryOpts): All optional parameters. -// https://core.telegram.org/bots/api#answerinlinequery -func (bot *Bot) AnswerInlineQuery(inlineQueryId string, results []InlineQueryResult, opts *AnswerInlineQueryOpts) (bool, error) { - v := map[string]string{} - v["inline_query_id"] = inlineQueryId - if results != nil { - bs, err := json.Marshal(results) - if err != nil { - return false, fmt.Errorf("failed to marshal field results: %w", err) - } - v["results"] = string(bs) - } - if opts != nil { - if opts.CacheTime != 0 { - v["cache_time"] = strconv.FormatInt(opts.CacheTime, 10) - } - v["is_personal"] = strconv.FormatBool(opts.IsPersonal) - v["next_offset"] = opts.NextOffset - v["switch_pm_text"] = opts.SwitchPmText - v["switch_pm_parameter"] = opts.SwitchPmParameter - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("answerInlineQuery", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// AnswerPreCheckoutQueryOpts is the set of optional fields for Bot.AnswerPreCheckoutQuery. -type AnswerPreCheckoutQueryOpts struct { - // Required if ok is False. Error message in human readable form that explains the reason for failure to proceed with the checkout (e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you were busy filling out your payment details. Please choose a different color or garment!"). Telegram will display this message to the user. - ErrorMessage string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// AnswerPreCheckoutQuery Once the user has confirmed their payment and shipping details, the Bot API sends the final confirmation in the form of an Update with the field pre_checkout_query. Use this method to respond to such pre-checkout queries. On success, True is returned. Note: The Bot API must receive an answer within 10 seconds after the pre-checkout query was sent. -// - preCheckoutQueryId (type string): Unique identifier for the query to be answered -// - ok (type bool): Specify True if everything is alright (goods are available, etc.) and the bot is ready to proceed with the order. Use False if there are any problems. -// - opts (type AnswerPreCheckoutQueryOpts): All optional parameters. -// https://core.telegram.org/bots/api#answerprecheckoutquery -func (bot *Bot) AnswerPreCheckoutQuery(preCheckoutQueryId string, ok bool, opts *AnswerPreCheckoutQueryOpts) (bool, error) { - v := map[string]string{} - v["pre_checkout_query_id"] = preCheckoutQueryId - v["ok"] = strconv.FormatBool(ok) - if opts != nil { - v["error_message"] = opts.ErrorMessage - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("answerPreCheckoutQuery", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// AnswerShippingQueryOpts is the set of optional fields for Bot.AnswerShippingQuery. -type AnswerShippingQueryOpts struct { - // Required if ok is True. A JSON-serialized array of available shipping options. - ShippingOptions []ShippingOption - // Required if ok is False. Error message in human readable form that explains why it is impossible to complete the order (e.g. "Sorry, delivery to your desired address is unavailable'). Telegram will display this message to the user. - ErrorMessage string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// AnswerShippingQuery If you sent an invoice requesting a shipping address and the parameter is_flexible was specified, the Bot API will send an Update with a shipping_query field to the bot. Use this method to reply to shipping queries. On success, True is returned. -// - shippingQueryId (type string): Unique identifier for the query to be answered -// - ok (type bool): Pass True if delivery to the specified address is possible and False if there are any problems (for example, if delivery to the specified address is not possible) -// - opts (type AnswerShippingQueryOpts): All optional parameters. -// https://core.telegram.org/bots/api#answershippingquery -func (bot *Bot) AnswerShippingQuery(shippingQueryId string, ok bool, opts *AnswerShippingQueryOpts) (bool, error) { - v := map[string]string{} - v["shipping_query_id"] = shippingQueryId - v["ok"] = strconv.FormatBool(ok) - if opts != nil { - if opts.ShippingOptions != nil { - bs, err := json.Marshal(opts.ShippingOptions) - if err != nil { - return false, fmt.Errorf("failed to marshal field shipping_options: %w", err) - } - v["shipping_options"] = string(bs) - } - v["error_message"] = opts.ErrorMessage - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("answerShippingQuery", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// AnswerWebAppQueryOpts is the set of optional fields for Bot.AnswerWebAppQuery. -type AnswerWebAppQueryOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// AnswerWebAppQuery Use this method to set the result of an interaction with a Web App and send a corresponding message on behalf of the user to the chat from which the query originated. On success, a SentWebAppMessage object is returned. -// - webAppQueryId (type string): Unique identifier for the query to be answered -// - result (type InlineQueryResult): A JSON-serialized object describing the message to be sent -// https://core.telegram.org/bots/api#answerwebappquery -func (bot *Bot) AnswerWebAppQuery(webAppQueryId string, result InlineQueryResult, opts *AnswerWebAppQueryOpts) (*SentWebAppMessage, error) { - v := map[string]string{} - v["web_app_query_id"] = webAppQueryId - bs, err := json.Marshal(result) - if err != nil { - return nil, fmt.Errorf("failed to marshal field result: %w", err) - } - v["result"] = string(bs) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("answerWebAppQuery", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var s SentWebAppMessage - return &s, json.Unmarshal(r, &s) -} - -// ApproveChatJoinRequestOpts is the set of optional fields for Bot.ApproveChatJoinRequest. -type ApproveChatJoinRequestOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// ApproveChatJoinRequest Use this method to approve a chat join request. The bot must be an administrator in the chat for this to work and must have the can_invite_users administrator right. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - userId (type int64): Unique identifier of the target user -// https://core.telegram.org/bots/api#approvechatjoinrequest -func (bot *Bot) ApproveChatJoinRequest(chatId int64, userId int64, opts *ApproveChatJoinRequestOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("approveChatJoinRequest", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// BanChatMemberOpts is the set of optional fields for Bot.BanChatMember. -type BanChatMemberOpts struct { - // Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever. Applied for supergroups and channels only. - UntilDate int64 - // Pass True to delete all messages from the chat for the user that is being removed. If False, the user will be able to see messages in the group that were sent before the user was removed. Always True for supergroups and channels. - RevokeMessages bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// BanChatMember Use this method to ban a user in a group, a supergroup or a channel. In the case of supergroups and channels, the user will not be able to return to the chat on their own using invite links, etc., unless unbanned first. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target group or username of the target supergroup or channel (in the format @channelusername) -// - userId (type int64): Unique identifier of the target user -// - opts (type BanChatMemberOpts): All optional parameters. -// https://core.telegram.org/bots/api#banchatmember -func (bot *Bot) BanChatMember(chatId int64, userId int64, opts *BanChatMemberOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - if opts != nil { - if opts.UntilDate != 0 { - v["until_date"] = strconv.FormatInt(opts.UntilDate, 10) - } - v["revoke_messages"] = strconv.FormatBool(opts.RevokeMessages) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("banChatMember", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// BanChatSenderChatOpts is the set of optional fields for Bot.BanChatSenderChat. -type BanChatSenderChatOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// BanChatSenderChat Use this method to ban a channel chat in a supergroup or a channel. Until the chat is unbanned, the owner of the banned chat won't be able to send messages on behalf of any of their channels. The bot must be an administrator in the supergroup or channel for this to work and must have the appropriate administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - senderChatId (type int64): Unique identifier of the target sender chat -// https://core.telegram.org/bots/api#banchatsenderchat -func (bot *Bot) BanChatSenderChat(chatId int64, senderChatId int64, opts *BanChatSenderChatOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["sender_chat_id"] = strconv.FormatInt(senderChatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("banChatSenderChat", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// CloseOpts is the set of optional fields for Bot.Close. -type CloseOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// Close Use this method to close the bot instance before moving it from one local server to another. You need to delete the webhook before calling this method to ensure that the bot isn't launched again after server restart. The method will return error 429 in the first 10 minutes after the bot is launched. Returns True on success. Requires no parameters. -// https://core.telegram.org/bots/api#close -func (bot *Bot) Close(opts *CloseOpts) (bool, error) { - v := map[string]string{} - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("close", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// CloseForumTopicOpts is the set of optional fields for Bot.CloseForumTopic. -type CloseForumTopicOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// CloseForumTopic Use this method to close an open topic in a forum supergroup chat. The bot must be an administrator in the chat for this to work and must have the can_manage_topics administrator rights, unless it is the creator of the topic. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - messageThreadId (type int64): Unique identifier for the target message thread of the forum topic -// https://core.telegram.org/bots/api#closeforumtopic -func (bot *Bot) CloseForumTopic(chatId int64, messageThreadId int64, opts *CloseForumTopicOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_thread_id"] = strconv.FormatInt(messageThreadId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("closeForumTopic", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// CopyMessageOpts is the set of optional fields for Bot.CopyMessage. -type CopyMessageOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // New caption for media, 0-1024 characters after entities parsing. If not specified, the original caption is kept - Caption string - // Mode for parsing entities in the new caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the new caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// CopyMessage Use this method to copy messages of any kind. Service messages and invoice messages can't be copied. A quiz poll can be copied only if the value of the field correct_option_id is known to the bot. The method is analogous to the method forwardMessage, but the copied message doesn't have a link to the original message. Returns the MessageId of the sent message on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - fromChatId (type int64): Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername) -// - messageId (type int64): Message identifier in the chat specified in from_chat_id -// - opts (type CopyMessageOpts): All optional parameters. -// https://core.telegram.org/bots/api#copymessage -func (bot *Bot) CopyMessage(chatId int64, fromChatId int64, messageId int64, opts *CopyMessageOpts) (*MessageId, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["from_chat_id"] = strconv.FormatInt(fromChatId, 10) - v["message_id"] = strconv.FormatInt(messageId, 10) - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("copyMessage", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m MessageId - return &m, json.Unmarshal(r, &m) -} - -// CreateChatInviteLinkOpts is the set of optional fields for Bot.CreateChatInviteLink. -type CreateChatInviteLinkOpts struct { - // Invite link name; 0-32 characters - Name string - // Point in time (Unix timestamp) when the link will expire - ExpireDate int64 - // The maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999 - MemberLimit int64 - // True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified - CreatesJoinRequest bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// CreateChatInviteLink Use this method to create an additional invite link for a chat. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. The link can be revoked using the method revokeChatInviteLink. Returns the new invite link as ChatInviteLink object. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - opts (type CreateChatInviteLinkOpts): All optional parameters. -// https://core.telegram.org/bots/api#createchatinvitelink -func (bot *Bot) CreateChatInviteLink(chatId int64, opts *CreateChatInviteLinkOpts) (*ChatInviteLink, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if opts != nil { - v["name"] = opts.Name - if opts.ExpireDate != 0 { - v["expire_date"] = strconv.FormatInt(opts.ExpireDate, 10) - } - if opts.MemberLimit != 0 { - v["member_limit"] = strconv.FormatInt(opts.MemberLimit, 10) - } - v["creates_join_request"] = strconv.FormatBool(opts.CreatesJoinRequest) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("createChatInviteLink", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var c ChatInviteLink - return &c, json.Unmarshal(r, &c) -} - -// CreateForumTopicOpts is the set of optional fields for Bot.CreateForumTopic. -type CreateForumTopicOpts struct { - // Color of the topic icon in RGB format. Currently, must be one of 7322096 (0x6FB9F0), 16766590 (0xFFD67E), 13338331 (0xCB86DB), 9367192 (0x8EEE98), 16749490 (0xFF93B2), or 16478047 (0xFB6F5F) - IconColor int64 - // Unique identifier of the custom emoji shown as the topic icon. Use getForumTopicIconStickers to get all allowed custom emoji identifiers. - IconCustomEmojiId string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// CreateForumTopic Use this method to create a topic in a forum supergroup chat. The bot must be an administrator in the chat for this to work and must have the can_manage_topics administrator rights. Returns information about the created topic as a ForumTopic object. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - name (type string): Topic name, 1-128 characters -// - opts (type CreateForumTopicOpts): All optional parameters. -// https://core.telegram.org/bots/api#createforumtopic -func (bot *Bot) CreateForumTopic(chatId int64, name string, opts *CreateForumTopicOpts) (*ForumTopic, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["name"] = name - if opts != nil { - if opts.IconColor != 0 { - v["icon_color"] = strconv.FormatInt(opts.IconColor, 10) - } - v["icon_custom_emoji_id"] = opts.IconCustomEmojiId - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("createForumTopic", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var f ForumTopic - return &f, json.Unmarshal(r, &f) -} - -// CreateInvoiceLinkOpts is the set of optional fields for Bot.CreateInvoiceLink. -type CreateInvoiceLinkOpts struct { - // The maximum accepted amount for tips in the smallest units of the currency (integer, not float/double). For example, for a maximum tip of US$ 1.45 pass max_tip_amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). Defaults to 0 - MaxTipAmount int64 - // A JSON-serialized array of suggested amounts of tips in the smallest units of the currency (integer, not float/double). At most 4 suggested tip amounts can be specified. The suggested tip amounts must be positive, passed in a strictly increased order and must not exceed max_tip_amount. - SuggestedTipAmounts []int64 - // JSON-serialized data about the invoice, which will be shared with the payment provider. A detailed description of required fields should be provided by the payment provider. - ProviderData string - // URL of the product photo for the invoice. Can be a photo of the goods or a marketing image for a service. - PhotoUrl string - // Photo size in bytes - PhotoSize int64 - // Photo width - PhotoWidth int64 - // Photo height - PhotoHeight int64 - // Pass True if you require the user's full name to complete the order - NeedName bool - // Pass True if you require the user's phone number to complete the order - NeedPhoneNumber bool - // Pass True if you require the user's email address to complete the order - NeedEmail bool - // Pass True if you require the user's shipping address to complete the order - NeedShippingAddress bool - // Pass True if the user's phone number should be sent to the provider - SendPhoneNumberToProvider bool - // Pass True if the user's email address should be sent to the provider - SendEmailToProvider bool - // Pass True if the final price depends on the shipping method - IsFlexible bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// CreateInvoiceLink Use this method to create a link for an invoice. Returns the created invoice link as String on success. -// - title (type string): Product name, 1-32 characters -// - description (type string): Product description, 1-255 characters -// - payload (type string): Bot-defined invoice payload, 1-128 bytes. This will not be displayed to the user, use for your internal processes. -// - providerToken (type string): Payment provider token, obtained via BotFather -// - currency (type string): Three-letter ISO 4217 currency code, see more on currencies -// - prices (type []LabeledPrice): Price breakdown, a JSON-serialized list of components (e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.) -// - opts (type CreateInvoiceLinkOpts): All optional parameters. -// https://core.telegram.org/bots/api#createinvoicelink -func (bot *Bot) CreateInvoiceLink(title string, description string, payload string, providerToken string, currency string, prices []LabeledPrice, opts *CreateInvoiceLinkOpts) (string, error) { - v := map[string]string{} - v["title"] = title - v["description"] = description - v["payload"] = payload - v["provider_token"] = providerToken - v["currency"] = currency - if prices != nil { - bs, err := json.Marshal(prices) - if err != nil { - return "", fmt.Errorf("failed to marshal field prices: %w", err) - } - v["prices"] = string(bs) - } - if opts != nil { - if opts.MaxTipAmount != 0 { - v["max_tip_amount"] = strconv.FormatInt(opts.MaxTipAmount, 10) - } - if opts.SuggestedTipAmounts != nil { - bs, err := json.Marshal(opts.SuggestedTipAmounts) - if err != nil { - return "", fmt.Errorf("failed to marshal field suggested_tip_amounts: %w", err) - } - v["suggested_tip_amounts"] = string(bs) - } - v["provider_data"] = opts.ProviderData - v["photo_url"] = opts.PhotoUrl - if opts.PhotoSize != 0 { - v["photo_size"] = strconv.FormatInt(opts.PhotoSize, 10) - } - if opts.PhotoWidth != 0 { - v["photo_width"] = strconv.FormatInt(opts.PhotoWidth, 10) - } - if opts.PhotoHeight != 0 { - v["photo_height"] = strconv.FormatInt(opts.PhotoHeight, 10) - } - v["need_name"] = strconv.FormatBool(opts.NeedName) - v["need_phone_number"] = strconv.FormatBool(opts.NeedPhoneNumber) - v["need_email"] = strconv.FormatBool(opts.NeedEmail) - v["need_shipping_address"] = strconv.FormatBool(opts.NeedShippingAddress) - v["send_phone_number_to_provider"] = strconv.FormatBool(opts.SendPhoneNumberToProvider) - v["send_email_to_provider"] = strconv.FormatBool(opts.SendEmailToProvider) - v["is_flexible"] = strconv.FormatBool(opts.IsFlexible) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("createInvoiceLink", v, nil, reqOpts) - if err != nil { - return "", err - } - - var s string - return s, json.Unmarshal(r, &s) -} - -// CreateNewStickerSetOpts is the set of optional fields for Bot.CreateNewStickerSet. -type CreateNewStickerSetOpts struct { - // PNG image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - PngSticker InputFile - // TGS animation with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#animated-sticker-requirements for technical requirements - TgsSticker InputFile - // WEBM video with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#video-sticker-requirements for technical requirements - WebmSticker InputFile - // Type of stickers in the set, pass "regular" or "mask". Custom emoji sticker sets can't be created via the Bot API at the moment. By default, a regular sticker set is created. - StickerType string - // A JSON-serialized object for position where the mask should be placed on faces - MaskPosition MaskPosition - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// CreateNewStickerSet Use this method to create a new sticker set owned by a user. The bot will be able to edit the sticker set thus created. You must use exactly one of the fields png_sticker, tgs_sticker, or webm_sticker. Returns True on success. -// - userId (type int64): User identifier of created sticker set owner -// - name (type string): Short name of sticker set, to be used in t.me/addstickers/ URLs (e.g., animals). Can contain only English letters, digits and underscores. Must begin with a letter, can't contain consecutive underscores and must end in "_by_". is case insensitive. 1-64 characters. -// - title (type string): Sticker set title, 1-64 characters -// - emojis (type string): One or more emoji corresponding to the sticker -// - opts (type CreateNewStickerSetOpts): All optional parameters. -// https://core.telegram.org/bots/api#createnewstickerset -func (bot *Bot) CreateNewStickerSet(userId int64, name string, title string, emojis string, opts *CreateNewStickerSetOpts) (bool, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["user_id"] = strconv.FormatInt(userId, 10) - v["name"] = name - v["title"] = title - v["emojis"] = emojis - if opts != nil { - if opts.PngSticker != nil { - switch m := opts.PngSticker.(type) { - case string: - v["png_sticker"] = m - - case NamedReader: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = m - - case io.Reader: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = NamedFile{File: m} - - case []byte: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.PngSticker) - } - } - if opts.TgsSticker != nil { - switch m := opts.TgsSticker.(type) { - case NamedReader: - v["tgs_sticker"] = "attach://tgs_sticker" - data["tgs_sticker"] = m - - case io.Reader: - v["tgs_sticker"] = "attach://tgs_sticker" - data["tgs_sticker"] = NamedFile{File: m} - - case []byte: - v["tgs_sticker"] = "attach://tgs_sticker" - data["tgs_sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.TgsSticker) - } - } - if opts.WebmSticker != nil { - switch m := opts.WebmSticker.(type) { - case NamedReader: - v["webm_sticker"] = "attach://webm_sticker" - data["webm_sticker"] = m - - case io.Reader: - v["webm_sticker"] = "attach://webm_sticker" - data["webm_sticker"] = NamedFile{File: m} - - case []byte: - v["webm_sticker"] = "attach://webm_sticker" - data["webm_sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.WebmSticker) - } - } - v["sticker_type"] = opts.StickerType - bs, err := json.Marshal(opts.MaskPosition) - if err != nil { - return false, fmt.Errorf("failed to marshal field mask_position: %w", err) - } - v["mask_position"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("createNewStickerSet", v, data, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeclineChatJoinRequestOpts is the set of optional fields for Bot.DeclineChatJoinRequest. -type DeclineChatJoinRequestOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeclineChatJoinRequest Use this method to decline a chat join request. The bot must be an administrator in the chat for this to work and must have the can_invite_users administrator right. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - userId (type int64): Unique identifier of the target user -// https://core.telegram.org/bots/api#declinechatjoinrequest -func (bot *Bot) DeclineChatJoinRequest(chatId int64, userId int64, opts *DeclineChatJoinRequestOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("declineChatJoinRequest", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeleteChatPhotoOpts is the set of optional fields for Bot.DeleteChatPhoto. -type DeleteChatPhotoOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeleteChatPhoto Use this method to delete a chat photo. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// https://core.telegram.org/bots/api#deletechatphoto -func (bot *Bot) DeleteChatPhoto(chatId int64, opts *DeleteChatPhotoOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("deleteChatPhoto", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeleteChatStickerSetOpts is the set of optional fields for Bot.DeleteChatStickerSet. -type DeleteChatStickerSetOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeleteChatStickerSet Use this method to delete a group sticker set from a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Use the field can_set_sticker_set optionally returned in getChat requests to check if the bot can use this method. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// https://core.telegram.org/bots/api#deletechatstickerset -func (bot *Bot) DeleteChatStickerSet(chatId int64, opts *DeleteChatStickerSetOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("deleteChatStickerSet", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeleteForumTopicOpts is the set of optional fields for Bot.DeleteForumTopic. -type DeleteForumTopicOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeleteForumTopic Use this method to delete a forum topic along with all its messages in a forum supergroup chat. The bot must be an administrator in the chat for this to work and must have the can_delete_messages administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - messageThreadId (type int64): Unique identifier for the target message thread of the forum topic -// https://core.telegram.org/bots/api#deleteforumtopic -func (bot *Bot) DeleteForumTopic(chatId int64, messageThreadId int64, opts *DeleteForumTopicOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_thread_id"] = strconv.FormatInt(messageThreadId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("deleteForumTopic", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeleteMessageOpts is the set of optional fields for Bot.DeleteMessage. -type DeleteMessageOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeleteMessage Use this method to delete a message, including service messages, with the following limitations: -// - A message can only be deleted if it was sent less than 48 hours ago. -// - Service messages about a supergroup, channel, or forum topic creation can't be deleted. -// - A dice message in a private chat can only be deleted if it was sent more than 24 hours ago. -// - Bots can delete outgoing messages in private chats, groups, and supergroups. -// - Bots can delete incoming messages in private chats. -// - Bots granted can_post_messages permissions can delete outgoing messages in channels. -// - If the bot is an administrator of a group, it can delete any message there. -// - If the bot has can_delete_messages permission in a supergroup or a channel, it can delete any message there. -// Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - messageId (type int64): Identifier of the message to delete -// https://core.telegram.org/bots/api#deletemessage -func (bot *Bot) DeleteMessage(chatId int64, messageId int64, opts *DeleteMessageOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_id"] = strconv.FormatInt(messageId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("deleteMessage", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeleteMyCommandsOpts is the set of optional fields for Bot.DeleteMyCommands. -type DeleteMyCommandsOpts struct { - // A JSON-serialized object, describing scope of users for which the commands are relevant. Defaults to BotCommandScopeDefault. - Scope BotCommandScope - // A two-letter ISO 639-1 language code. If empty, commands will be applied to all users from the given scope, for whose language there are no dedicated commands - LanguageCode string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeleteMyCommands Use this method to delete the list of the bot's commands for the given scope and user language. After deletion, higher level commands will be shown to affected users. Returns True on success. -// - opts (type DeleteMyCommandsOpts): All optional parameters. -// https://core.telegram.org/bots/api#deletemycommands -func (bot *Bot) DeleteMyCommands(opts *DeleteMyCommandsOpts) (bool, error) { - v := map[string]string{} - if opts != nil { - bs, err := json.Marshal(opts.Scope) - if err != nil { - return false, fmt.Errorf("failed to marshal field scope: %w", err) - } - v["scope"] = string(bs) - v["language_code"] = opts.LanguageCode - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("deleteMyCommands", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeleteStickerFromSetOpts is the set of optional fields for Bot.DeleteStickerFromSet. -type DeleteStickerFromSetOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeleteStickerFromSet Use this method to delete a sticker from a set created by the bot. Returns True on success. -// - sticker (type string): File identifier of the sticker -// https://core.telegram.org/bots/api#deletestickerfromset -func (bot *Bot) DeleteStickerFromSet(sticker string, opts *DeleteStickerFromSetOpts) (bool, error) { - v := map[string]string{} - v["sticker"] = sticker - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("deleteStickerFromSet", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// DeleteWebhookOpts is the set of optional fields for Bot.DeleteWebhook. -type DeleteWebhookOpts struct { - // Pass True to drop all pending updates - DropPendingUpdates bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// DeleteWebhook Use this method to remove webhook integration if you decide to switch back to getUpdates. Returns True on success. -// - opts (type DeleteWebhookOpts): All optional parameters. -// https://core.telegram.org/bots/api#deletewebhook -func (bot *Bot) DeleteWebhook(opts *DeleteWebhookOpts) (bool, error) { - v := map[string]string{} - if opts != nil { - v["drop_pending_updates"] = strconv.FormatBool(opts.DropPendingUpdates) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("deleteWebhook", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// EditChatInviteLinkOpts is the set of optional fields for Bot.EditChatInviteLink. -type EditChatInviteLinkOpts struct { - // Invite link name; 0-32 characters - Name string - // Point in time (Unix timestamp) when the link will expire - ExpireDate int64 - // The maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999 - MemberLimit int64 - // True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified - CreatesJoinRequest bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// EditChatInviteLink Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the edited invite link as a ChatInviteLink object. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - inviteLink (type string): The invite link to edit -// - opts (type EditChatInviteLinkOpts): All optional parameters. -// https://core.telegram.org/bots/api#editchatinvitelink -func (bot *Bot) EditChatInviteLink(chatId int64, inviteLink string, opts *EditChatInviteLinkOpts) (*ChatInviteLink, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["invite_link"] = inviteLink - if opts != nil { - v["name"] = opts.Name - if opts.ExpireDate != 0 { - v["expire_date"] = strconv.FormatInt(opts.ExpireDate, 10) - } - if opts.MemberLimit != 0 { - v["member_limit"] = strconv.FormatInt(opts.MemberLimit, 10) - } - v["creates_join_request"] = strconv.FormatBool(opts.CreatesJoinRequest) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("editChatInviteLink", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var c ChatInviteLink - return &c, json.Unmarshal(r, &c) -} - -// EditForumTopicOpts is the set of optional fields for Bot.EditForumTopic. -type EditForumTopicOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// EditForumTopic Use this method to edit name and icon of a topic in a forum supergroup chat. The bot must be an administrator in the chat for this to work and must have can_manage_topics administrator rights, unless it is the creator of the topic. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - messageThreadId (type int64): Unique identifier for the target message thread of the forum topic -// - name (type string): New topic name, 1-128 characters -// - iconCustomEmojiId (type string): New unique identifier of the custom emoji shown as the topic icon. Use getForumTopicIconStickers to get all allowed custom emoji identifiers. -// https://core.telegram.org/bots/api#editforumtopic -func (bot *Bot) EditForumTopic(chatId int64, messageThreadId int64, name string, iconCustomEmojiId string, opts *EditForumTopicOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_thread_id"] = strconv.FormatInt(messageThreadId, 10) - v["name"] = name - v["icon_custom_emoji_id"] = iconCustomEmojiId - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("editForumTopic", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// EditMessageCaptionOpts is the set of optional fields for Bot.EditMessageCaption. -type EditMessageCaptionOpts struct { - // Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the message to edit - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // New caption of the message, 0-1024 characters after entities parsing - Caption string - // Mode for parsing entities in the message caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // A JSON-serialized object for an inline keyboard. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// EditMessageCaption Use this method to edit captions of messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. -// - opts (type EditMessageCaptionOpts): All optional parameters. -// https://core.telegram.org/bots/api#editmessagecaption -func (bot *Bot) EditMessageCaption(opts *EditMessageCaptionOpts) (*Message, bool, error) { - v := map[string]string{} - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("editMessageCaption", v, nil, reqOpts) - if err != nil { - return nil, false, err - } - - var m Message - if err := json.Unmarshal(r, &m); err != nil { - var b bool - if err := json.Unmarshal(r, &b); err != nil { - return nil, false, err - } - return nil, b, nil - } - return &m, true, nil - -} - -// EditMessageLiveLocationOpts is the set of optional fields for Bot.EditMessageLiveLocation. -type EditMessageLiveLocationOpts struct { - // Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the message to edit - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // The radius of uncertainty for the location, measured in meters; 0-1500 - HorizontalAccuracy float64 - // Direction in which the user is moving, in degrees. Must be between 1 and 360 if specified. - Heading int64 - // The maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified. - ProximityAlertRadius int64 - // A JSON-serialized object for a new inline keyboard. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// EditMessageLiveLocation Use this method to edit live location messages. A location can be edited until its live_period expires or editing is explicitly disabled by a call to stopMessageLiveLocation. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. -// - latitude (type float64): Latitude of new location -// - longitude (type float64): Longitude of new location -// - opts (type EditMessageLiveLocationOpts): All optional parameters. -// https://core.telegram.org/bots/api#editmessagelivelocation -func (bot *Bot) EditMessageLiveLocation(latitude float64, longitude float64, opts *EditMessageLiveLocationOpts) (*Message, bool, error) { - v := map[string]string{} - v["latitude"] = strconv.FormatFloat(latitude, 'f', -1, 64) - v["longitude"] = strconv.FormatFloat(longitude, 'f', -1, 64) - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - if opts.HorizontalAccuracy != 0.0 { - v["horizontal_accuracy"] = strconv.FormatFloat(opts.HorizontalAccuracy, 'f', -1, 64) - } - if opts.Heading != 0 { - v["heading"] = strconv.FormatInt(opts.Heading, 10) - } - if opts.ProximityAlertRadius != 0 { - v["proximity_alert_radius"] = strconv.FormatInt(opts.ProximityAlertRadius, 10) - } - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("editMessageLiveLocation", v, nil, reqOpts) - if err != nil { - return nil, false, err - } - - var m Message - if err := json.Unmarshal(r, &m); err != nil { - var b bool - if err := json.Unmarshal(r, &b); err != nil { - return nil, false, err - } - return nil, b, nil - } - return &m, true, nil - -} - -// EditMessageMediaOpts is the set of optional fields for Bot.EditMessageMedia. -type EditMessageMediaOpts struct { - // Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the message to edit - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // A JSON-serialized object for a new inline keyboard. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// EditMessageMedia Use this method to edit animation, audio, document, photo, or video messages. If a message is part of a message album, then it can be edited only to an audio for audio albums, only to a document for document albums and to a photo or a video otherwise. When an inline message is edited, a new file can't be uploaded; use a previously uploaded file via its file_id or specify a URL. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. -// - media (type InputMedia): A JSON-serialized object for a new media content of the message -// - opts (type EditMessageMediaOpts): All optional parameters. -// https://core.telegram.org/bots/api#editmessagemedia -func (bot *Bot) EditMessageMedia(media InputMedia, opts *EditMessageMediaOpts) (*Message, bool, error) { - v := map[string]string{} - data := map[string]NamedReader{} - inputMediaBs, err := media.InputMediaParams("media", data) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field media: %w", err) - } - v["media"] = string(inputMediaBs) - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("editMessageMedia", v, data, reqOpts) - if err != nil { - return nil, false, err - } - - var m Message - if err := json.Unmarshal(r, &m); err != nil { - var b bool - if err := json.Unmarshal(r, &b); err != nil { - return nil, false, err - } - return nil, b, nil - } - return &m, true, nil - -} - -// EditMessageReplyMarkupOpts is the set of optional fields for Bot.EditMessageReplyMarkup. -type EditMessageReplyMarkupOpts struct { - // Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the message to edit - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // A JSON-serialized object for an inline keyboard. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// EditMessageReplyMarkup Use this method to edit only the reply markup of messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. -// - opts (type EditMessageReplyMarkupOpts): All optional parameters. -// https://core.telegram.org/bots/api#editmessagereplymarkup -func (bot *Bot) EditMessageReplyMarkup(opts *EditMessageReplyMarkupOpts) (*Message, bool, error) { - v := map[string]string{} - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("editMessageReplyMarkup", v, nil, reqOpts) - if err != nil { - return nil, false, err - } - - var m Message - if err := json.Unmarshal(r, &m); err != nil { - var b bool - if err := json.Unmarshal(r, &b); err != nil { - return nil, false, err - } - return nil, b, nil - } - return &m, true, nil - -} - -// EditMessageTextOpts is the set of optional fields for Bot.EditMessageText. -type EditMessageTextOpts struct { - // Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the message to edit - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // Mode for parsing entities in the message text. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode - Entities []MessageEntity - // Disables link previews for links in this message - DisableWebPagePreview bool - // A JSON-serialized object for an inline keyboard. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// EditMessageText Use this method to edit text and game messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. -// - text (type string): New text of the message, 1-4096 characters after entities parsing -// - opts (type EditMessageTextOpts): All optional parameters. -// https://core.telegram.org/bots/api#editmessagetext -func (bot *Bot) EditMessageText(text string, opts *EditMessageTextOpts) (*Message, bool, error) { - v := map[string]string{} - v["text"] = text - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - v["parse_mode"] = opts.ParseMode - if opts.Entities != nil { - bs, err := json.Marshal(opts.Entities) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field entities: %w", err) - } - v["entities"] = string(bs) - } - v["disable_web_page_preview"] = strconv.FormatBool(opts.DisableWebPagePreview) - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("editMessageText", v, nil, reqOpts) - if err != nil { - return nil, false, err - } - - var m Message - if err := json.Unmarshal(r, &m); err != nil { - var b bool - if err := json.Unmarshal(r, &b); err != nil { - return nil, false, err - } - return nil, b, nil - } - return &m, true, nil - -} - -// ExportChatInviteLinkOpts is the set of optional fields for Bot.ExportChatInviteLink. -type ExportChatInviteLinkOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// ExportChatInviteLink Use this method to generate a new primary invite link for a chat; any previously generated primary link is revoked. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the new invite link as String on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// https://core.telegram.org/bots/api#exportchatinvitelink -func (bot *Bot) ExportChatInviteLink(chatId int64, opts *ExportChatInviteLinkOpts) (string, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("exportChatInviteLink", v, nil, reqOpts) - if err != nil { - return "", err - } - - var s string - return s, json.Unmarshal(r, &s) -} - -// ForwardMessageOpts is the set of optional fields for Bot.ForwardMessage. -type ForwardMessageOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the forwarded message from forwarding and saving - ProtectContent bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// ForwardMessage Use this method to forward messages of any kind. Service messages can't be forwarded. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - fromChatId (type int64): Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername) -// - messageId (type int64): Message identifier in the chat specified in from_chat_id -// - opts (type ForwardMessageOpts): All optional parameters. -// https://core.telegram.org/bots/api#forwardmessage -func (bot *Bot) ForwardMessage(chatId int64, fromChatId int64, messageId int64, opts *ForwardMessageOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["from_chat_id"] = strconv.FormatInt(fromChatId, 10) - v["message_id"] = strconv.FormatInt(messageId, 10) - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("forwardMessage", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// GetChatOpts is the set of optional fields for Bot.GetChat. -type GetChatOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetChat Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Returns a Chat object on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) -// https://core.telegram.org/bots/api#getchat -func (bot *Bot) GetChat(chatId int64, opts *GetChatOpts) (*Chat, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getChat", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var c Chat - return &c, json.Unmarshal(r, &c) -} - -// GetChatAdministratorsOpts is the set of optional fields for Bot.GetChatAdministrators. -type GetChatAdministratorsOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetChatAdministrators Use this method to get a list of administrators in a chat, which aren't bots. Returns an Array of ChatMember objects. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) -// https://core.telegram.org/bots/api#getchatadministrators -func (bot *Bot) GetChatAdministrators(chatId int64, opts *GetChatAdministratorsOpts) ([]ChatMember, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getChatAdministrators", v, nil, reqOpts) - if err != nil { - return nil, err - } - - return unmarshalChatMemberArray(r) -} - -// GetChatMemberOpts is the set of optional fields for Bot.GetChatMember. -type GetChatMemberOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetChatMember Use this method to get information about a member of a chat. Returns a ChatMember object on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) -// - userId (type int64): Unique identifier of the target user -// https://core.telegram.org/bots/api#getchatmember -func (bot *Bot) GetChatMember(chatId int64, userId int64, opts *GetChatMemberOpts) (ChatMember, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getChatMember", v, nil, reqOpts) - if err != nil { - return nil, err - } - - return unmarshalChatMember(r) -} - -// GetChatMemberCountOpts is the set of optional fields for Bot.GetChatMemberCount. -type GetChatMemberCountOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetChatMemberCount Use this method to get the number of members in a chat. Returns Int on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) -// https://core.telegram.org/bots/api#getchatmembercount -func (bot *Bot) GetChatMemberCount(chatId int64, opts *GetChatMemberCountOpts) (int64, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getChatMemberCount", v, nil, reqOpts) - if err != nil { - return 0, err - } - - var i int64 - return i, json.Unmarshal(r, &i) -} - -// GetChatMenuButtonOpts is the set of optional fields for Bot.GetChatMenuButton. -type GetChatMenuButtonOpts struct { - // Unique identifier for the target private chat. If not specified, default bot's menu button will be returned - ChatId int64 - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetChatMenuButton Use this method to get the current value of the bot's menu button in a private chat, or the default menu button. Returns MenuButton on success. -// - opts (type GetChatMenuButtonOpts): All optional parameters. -// https://core.telegram.org/bots/api#getchatmenubutton -func (bot *Bot) GetChatMenuButton(opts *GetChatMenuButtonOpts) (MenuButton, error) { - v := map[string]string{} - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getChatMenuButton", v, nil, reqOpts) - if err != nil { - return nil, err - } - - return unmarshalMenuButton(r) -} - -// GetCustomEmojiStickersOpts is the set of optional fields for Bot.GetCustomEmojiStickers. -type GetCustomEmojiStickersOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetCustomEmojiStickers Use this method to get information about custom emoji stickers by their identifiers. Returns an Array of Sticker objects. -// - customEmojiIds (type []string): List of custom emoji identifiers. At most 200 custom emoji identifiers can be specified. -// https://core.telegram.org/bots/api#getcustomemojistickers -func (bot *Bot) GetCustomEmojiStickers(customEmojiIds []string, opts *GetCustomEmojiStickersOpts) ([]Sticker, error) { - v := map[string]string{} - if customEmojiIds != nil { - bs, err := json.Marshal(customEmojiIds) - if err != nil { - return nil, fmt.Errorf("failed to marshal field custom_emoji_ids: %w", err) - } - v["custom_emoji_ids"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getCustomEmojiStickers", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var s []Sticker - return s, json.Unmarshal(r, &s) -} - -// GetFileOpts is the set of optional fields for Bot.GetFile. -type GetFileOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetFile Use this method to get basic information about a file and prepare it for downloading. For the moment, bots can download files of up to 20MB in size. On success, a File object is returned. The file can then be downloaded via the link https://api.telegram.org/file/bot/, where is taken from the response. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile again. -// Note: This function may not preserve the original file name and MIME type. You should save the file's MIME type and name (if available) when the File object is received. -// - fileId (type string): File identifier to get information about -// https://core.telegram.org/bots/api#getfile -func (bot *Bot) GetFile(fileId string, opts *GetFileOpts) (*File, error) { - v := map[string]string{} - v["file_id"] = fileId - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getFile", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var f File - return &f, json.Unmarshal(r, &f) -} - -// GetForumTopicIconStickersOpts is the set of optional fields for Bot.GetForumTopicIconStickers. -type GetForumTopicIconStickersOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetForumTopicIconStickers Use this method to get custom emoji stickers, which can be used as a forum topic icon by any user. Requires no parameters. Returns an Array of Sticker objects. -// https://core.telegram.org/bots/api#getforumtopiciconstickers -func (bot *Bot) GetForumTopicIconStickers(opts *GetForumTopicIconStickersOpts) ([]Sticker, error) { - v := map[string]string{} - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getForumTopicIconStickers", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var s []Sticker - return s, json.Unmarshal(r, &s) -} - -// GetGameHighScoresOpts is the set of optional fields for Bot.GetGameHighScores. -type GetGameHighScoresOpts struct { - // Required if inline_message_id is not specified. Unique identifier for the target chat - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the sent message - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetGameHighScores Use this method to get data for high score tables. Will return the score of the specified user and several of their neighbors in a game. Returns an Array of GameHighScore objects. -// - userId (type int64): Target user id -// - opts (type GetGameHighScoresOpts): All optional parameters. -// https://core.telegram.org/bots/api#getgamehighscores -func (bot *Bot) GetGameHighScores(userId int64, opts *GetGameHighScoresOpts) ([]GameHighScore, error) { - v := map[string]string{} - v["user_id"] = strconv.FormatInt(userId, 10) - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getGameHighScores", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var g []GameHighScore - return g, json.Unmarshal(r, &g) -} - -// GetMeOpts is the set of optional fields for Bot.GetMe. -type GetMeOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetMe A simple method for testing your bot's authentication token. Requires no parameters. Returns basic information about the bot in form of a User object. -// https://core.telegram.org/bots/api#getme -func (bot *Bot) GetMe(opts *GetMeOpts) (*User, error) { - v := map[string]string{} - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getMe", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var u User - return &u, json.Unmarshal(r, &u) -} - -// GetMyCommandsOpts is the set of optional fields for Bot.GetMyCommands. -type GetMyCommandsOpts struct { - // A JSON-serialized object, describing scope of users. Defaults to BotCommandScopeDefault. - Scope BotCommandScope - // A two-letter ISO 639-1 language code or an empty string - LanguageCode string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetMyCommands Use this method to get the current list of the bot's commands for the given scope and user language. Returns an Array of BotCommand objects. If commands aren't set, an empty list is returned. -// - opts (type GetMyCommandsOpts): All optional parameters. -// https://core.telegram.org/bots/api#getmycommands -func (bot *Bot) GetMyCommands(opts *GetMyCommandsOpts) ([]BotCommand, error) { - v := map[string]string{} - if opts != nil { - bs, err := json.Marshal(opts.Scope) - if err != nil { - return nil, fmt.Errorf("failed to marshal field scope: %w", err) - } - v["scope"] = string(bs) - v["language_code"] = opts.LanguageCode - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getMyCommands", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var b []BotCommand - return b, json.Unmarshal(r, &b) -} - -// GetMyDefaultAdministratorRightsOpts is the set of optional fields for Bot.GetMyDefaultAdministratorRights. -type GetMyDefaultAdministratorRightsOpts struct { - // Pass True to get default administrator rights of the bot in channels. Otherwise, default administrator rights of the bot for groups and supergroups will be returned. - ForChannels bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetMyDefaultAdministratorRights Use this method to get the current default administrator rights of the bot. Returns ChatAdministratorRights on success. -// - opts (type GetMyDefaultAdministratorRightsOpts): All optional parameters. -// https://core.telegram.org/bots/api#getmydefaultadministratorrights -func (bot *Bot) GetMyDefaultAdministratorRights(opts *GetMyDefaultAdministratorRightsOpts) (*ChatAdministratorRights, error) { - v := map[string]string{} - if opts != nil { - v["for_channels"] = strconv.FormatBool(opts.ForChannels) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getMyDefaultAdministratorRights", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var c ChatAdministratorRights - return &c, json.Unmarshal(r, &c) -} - -// GetStickerSetOpts is the set of optional fields for Bot.GetStickerSet. -type GetStickerSetOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetStickerSet Use this method to get a sticker set. On success, a StickerSet object is returned. -// - name (type string): Name of the sticker set -// https://core.telegram.org/bots/api#getstickerset -func (bot *Bot) GetStickerSet(name string, opts *GetStickerSetOpts) (*StickerSet, error) { - v := map[string]string{} - v["name"] = name - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getStickerSet", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var s StickerSet - return &s, json.Unmarshal(r, &s) -} - -// GetUpdatesOpts is the set of optional fields for Bot.GetUpdates. -type GetUpdatesOpts struct { - // Identifier of the first update to be returned. Must be greater by one than the highest among the identifiers of previously received updates. By default, updates starting with the earliest unconfirmed update are returned. An update is considered confirmed as soon as getUpdates is called with an offset higher than its update_id. The negative offset can be specified to retrieve updates starting from -offset update from the end of the updates queue. All previous updates will forgotten. - Offset int64 - // Limits the number of updates to be retrieved. Values between 1-100 are accepted. Defaults to 100. - Limit int64 - // Timeout in seconds for long polling. Defaults to 0, i.e. usual short polling. Should be positive, short polling should be used for testing purposes only. - Timeout int64 - // A JSON-serialized list of the update types you want your bot to receive. For example, specify ["message", "edited_channel_post", "callback_query"] to only receive updates of these types. See Update for a complete list of available update types. Specify an empty list to receive all update types except chat_member (default). If not specified, the previous setting will be used. Please note that this parameter doesn't affect updates created before the call to the getUpdates, so unwanted updates may be received for a short period of time. - AllowedUpdates []string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetUpdates Use this method to receive incoming updates using long polling (wiki). Returns an Array of Update objects. -// - opts (type GetUpdatesOpts): All optional parameters. -// https://core.telegram.org/bots/api#getupdates -func (bot *Bot) GetUpdates(opts *GetUpdatesOpts) ([]Update, error) { - v := map[string]string{} - if opts != nil { - if opts.Offset != 0 { - v["offset"] = strconv.FormatInt(opts.Offset, 10) - } - if opts.Limit != 0 { - v["limit"] = strconv.FormatInt(opts.Limit, 10) - } - if opts.Timeout != 0 { - v["timeout"] = strconv.FormatInt(opts.Timeout, 10) - } - if opts.AllowedUpdates != nil { - bs, err := json.Marshal(opts.AllowedUpdates) - if err != nil { - return nil, fmt.Errorf("failed to marshal field allowed_updates: %w", err) - } - v["allowed_updates"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getUpdates", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var u []Update - return u, json.Unmarshal(r, &u) -} - -// GetUserProfilePhotosOpts is the set of optional fields for Bot.GetUserProfilePhotos. -type GetUserProfilePhotosOpts struct { - // Sequential number of the first photo to be returned. By default, all photos are returned. - Offset int64 - // Limits the number of photos to be retrieved. Values between 1-100 are accepted. Defaults to 100. - Limit int64 - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetUserProfilePhotos Use this method to get a list of profile pictures for a user. Returns a UserProfilePhotos object. -// - userId (type int64): Unique identifier of the target user -// - opts (type GetUserProfilePhotosOpts): All optional parameters. -// https://core.telegram.org/bots/api#getuserprofilephotos -func (bot *Bot) GetUserProfilePhotos(userId int64, opts *GetUserProfilePhotosOpts) (*UserProfilePhotos, error) { - v := map[string]string{} - v["user_id"] = strconv.FormatInt(userId, 10) - if opts != nil { - if opts.Offset != 0 { - v["offset"] = strconv.FormatInt(opts.Offset, 10) - } - if opts.Limit != 0 { - v["limit"] = strconv.FormatInt(opts.Limit, 10) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getUserProfilePhotos", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var u UserProfilePhotos - return &u, json.Unmarshal(r, &u) -} - -// GetWebhookInfoOpts is the set of optional fields for Bot.GetWebhookInfo. -type GetWebhookInfoOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// GetWebhookInfo Use this method to get current webhook status. Requires no parameters. On success, returns a WebhookInfo object. If the bot is using getUpdates, will return an object with the url field empty. -// https://core.telegram.org/bots/api#getwebhookinfo -func (bot *Bot) GetWebhookInfo(opts *GetWebhookInfoOpts) (*WebhookInfo, error) { - v := map[string]string{} - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("getWebhookInfo", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var w WebhookInfo - return &w, json.Unmarshal(r, &w) -} - -// LeaveChatOpts is the set of optional fields for Bot.LeaveChat. -type LeaveChatOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// LeaveChat Use this method for your bot to leave a group, supergroup or channel. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) -// https://core.telegram.org/bots/api#leavechat -func (bot *Bot) LeaveChat(chatId int64, opts *LeaveChatOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("leaveChat", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// LogOutOpts is the set of optional fields for Bot.LogOut. -type LogOutOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// LogOut Use this method to log out from the cloud Bot API server before launching the bot locally. You must log out the bot before running it locally, otherwise there is no guarantee that the bot will receive updates. After a successful call, you can immediately log in on a local server, but will not be able to log in back to the cloud Bot API server for 10 minutes. Returns True on success. Requires no parameters. -// https://core.telegram.org/bots/api#logout -func (bot *Bot) LogOut(opts *LogOutOpts) (bool, error) { - v := map[string]string{} - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("logOut", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// PinChatMessageOpts is the set of optional fields for Bot.PinChatMessage. -type PinChatMessageOpts struct { - // Pass True if it is not necessary to send a notification to all chat members about the new pinned message. Notifications are always disabled in channels and private chats. - DisableNotification bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// PinChatMessage Use this method to add a message to the list of pinned messages in a chat. If the chat is not a private chat, the bot must be an administrator in the chat for this to work and must have the 'can_pin_messages' administrator right in a supergroup or 'can_edit_messages' administrator right in a channel. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - messageId (type int64): Identifier of a message to pin -// - opts (type PinChatMessageOpts): All optional parameters. -// https://core.telegram.org/bots/api#pinchatmessage -func (bot *Bot) PinChatMessage(chatId int64, messageId int64, opts *PinChatMessageOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_id"] = strconv.FormatInt(messageId, 10) - if opts != nil { - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("pinChatMessage", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// PromoteChatMemberOpts is the set of optional fields for Bot.PromoteChatMember. -type PromoteChatMemberOpts struct { - // Pass True if the administrator's presence in the chat is hidden - IsAnonymous bool - // Pass True if the administrator can access the chat event log, chat statistics, message statistics in channels, see channel members, see anonymous administrators in supergroups and ignore slow mode. Implied by any other administrator privilege - CanManageChat bool - // Pass True if the administrator can create channel posts, channels only - CanPostMessages bool - // Pass True if the administrator can edit messages of other users and can pin messages, channels only - CanEditMessages bool - // Pass True if the administrator can delete messages of other users - CanDeleteMessages bool - // Pass True if the administrator can manage video chats - CanManageVideoChats bool - // Pass True if the administrator can restrict, ban or unban chat members - CanRestrictMembers bool - // Pass True if the administrator can add new administrators with a subset of their own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him) - CanPromoteMembers bool - // Pass True if the administrator can change chat title, photo and other settings - CanChangeInfo bool - // Pass True if the administrator can invite new users to the chat - CanInviteUsers bool - // Pass True if the administrator can pin messages, supergroups only - CanPinMessages bool - // Pass True if the user is allowed to create, rename, close, and reopen forum topics, supergroups only - CanManageTopics bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// PromoteChatMember Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Pass False for all boolean parameters to demote a user. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - userId (type int64): Unique identifier of the target user -// - opts (type PromoteChatMemberOpts): All optional parameters. -// https://core.telegram.org/bots/api#promotechatmember -func (bot *Bot) PromoteChatMember(chatId int64, userId int64, opts *PromoteChatMemberOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - if opts != nil { - v["is_anonymous"] = strconv.FormatBool(opts.IsAnonymous) - v["can_manage_chat"] = strconv.FormatBool(opts.CanManageChat) - v["can_post_messages"] = strconv.FormatBool(opts.CanPostMessages) - v["can_edit_messages"] = strconv.FormatBool(opts.CanEditMessages) - v["can_delete_messages"] = strconv.FormatBool(opts.CanDeleteMessages) - v["can_manage_video_chats"] = strconv.FormatBool(opts.CanManageVideoChats) - v["can_restrict_members"] = strconv.FormatBool(opts.CanRestrictMembers) - v["can_promote_members"] = strconv.FormatBool(opts.CanPromoteMembers) - v["can_change_info"] = strconv.FormatBool(opts.CanChangeInfo) - v["can_invite_users"] = strconv.FormatBool(opts.CanInviteUsers) - v["can_pin_messages"] = strconv.FormatBool(opts.CanPinMessages) - v["can_manage_topics"] = strconv.FormatBool(opts.CanManageTopics) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("promoteChatMember", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// ReopenForumTopicOpts is the set of optional fields for Bot.ReopenForumTopic. -type ReopenForumTopicOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// ReopenForumTopic Use this method to reopen a closed topic in a forum supergroup chat. The bot must be an administrator in the chat for this to work and must have the can_manage_topics administrator rights, unless it is the creator of the topic. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - messageThreadId (type int64): Unique identifier for the target message thread of the forum topic -// https://core.telegram.org/bots/api#reopenforumtopic -func (bot *Bot) ReopenForumTopic(chatId int64, messageThreadId int64, opts *ReopenForumTopicOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_thread_id"] = strconv.FormatInt(messageThreadId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("reopenForumTopic", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// RestrictChatMemberOpts is the set of optional fields for Bot.RestrictChatMember. -type RestrictChatMemberOpts struct { - // Date when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever - UntilDate int64 - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// RestrictChatMember Use this method to restrict a user in a supergroup. The bot must be an administrator in the supergroup for this to work and must have the appropriate administrator rights. Pass True for all permissions to lift restrictions from a user. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - userId (type int64): Unique identifier of the target user -// - permissions (type ChatPermissions): A JSON-serialized object for new user permissions -// - opts (type RestrictChatMemberOpts): All optional parameters. -// https://core.telegram.org/bots/api#restrictchatmember -func (bot *Bot) RestrictChatMember(chatId int64, userId int64, permissions ChatPermissions, opts *RestrictChatMemberOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - bs, err := json.Marshal(permissions) - if err != nil { - return false, fmt.Errorf("failed to marshal field permissions: %w", err) - } - v["permissions"] = string(bs) - if opts != nil { - if opts.UntilDate != 0 { - v["until_date"] = strconv.FormatInt(opts.UntilDate, 10) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("restrictChatMember", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// RevokeChatInviteLinkOpts is the set of optional fields for Bot.RevokeChatInviteLink. -type RevokeChatInviteLinkOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// RevokeChatInviteLink Use this method to revoke an invite link created by the bot. If the primary link is revoked, a new link is automatically generated. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the revoked invite link as ChatInviteLink object. -// - chatId (type int64): Unique identifier of the target chat or username of the target channel (in the format @channelusername) -// - inviteLink (type string): The invite link to revoke -// https://core.telegram.org/bots/api#revokechatinvitelink -func (bot *Bot) RevokeChatInviteLink(chatId int64, inviteLink string, opts *RevokeChatInviteLinkOpts) (*ChatInviteLink, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["invite_link"] = inviteLink - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("revokeChatInviteLink", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var c ChatInviteLink - return &c, json.Unmarshal(r, &c) -} - -// SendAnimationOpts is the set of optional fields for Bot.SendAnimation. -type SendAnimationOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Duration of sent animation in seconds - Duration int64 - // Animation width - Width int64 - // Animation height - Height int64 - // Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb InputFile - // Animation caption (may also be used when resending animation by file_id), 0-1024 characters after entities parsing - Caption string - // Mode for parsing entities in the animation caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendAnimation Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). On success, the sent Message is returned. Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - animation (type InputFile): Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// - opts (type SendAnimationOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendanimation -func (bot *Bot) SendAnimation(chatId int64, animation InputFile, opts *SendAnimationOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if animation != nil { - switch m := animation.(type) { - case string: - v["animation"] = m - - case NamedReader: - v["animation"] = "attach://animation" - data["animation"] = m - - case io.Reader: - v["animation"] = "attach://animation" - data["animation"] = NamedFile{File: m} - - case []byte: - v["animation"] = "attach://animation" - data["animation"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", animation) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - if opts.Duration != 0 { - v["duration"] = strconv.FormatInt(opts.Duration, 10) - } - if opts.Width != 0 { - v["width"] = strconv.FormatInt(opts.Width, 10) - } - if opts.Height != 0 { - v["height"] = strconv.FormatInt(opts.Height, 10) - } - if opts.Thumb != nil { - switch m := opts.Thumb.(type) { - case string: - v["thumb"] = m - - case NamedReader: - v["thumb"] = "attach://thumb" - data["thumb"] = m - - case io.Reader: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: m} - - case []byte: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", opts.Thumb) - } - } - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendAnimation", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendAudioOpts is the set of optional fields for Bot.SendAudio. -type SendAudioOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Audio caption, 0-1024 characters after entities parsing - Caption string - // Mode for parsing entities in the audio caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // Duration of the audio in seconds - Duration int64 - // Performer - Performer string - // Track name - Title string - // Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb InputFile - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendAudio Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .MP3 or .M4A format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. -// For sending voice messages, use the sendVoice method instead. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - audio (type InputFile): Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// - opts (type SendAudioOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendaudio -func (bot *Bot) SendAudio(chatId int64, audio InputFile, opts *SendAudioOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if audio != nil { - switch m := audio.(type) { - case string: - v["audio"] = m - - case NamedReader: - v["audio"] = "attach://audio" - data["audio"] = m - - case io.Reader: - v["audio"] = "attach://audio" - data["audio"] = NamedFile{File: m} - - case []byte: - v["audio"] = "attach://audio" - data["audio"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", audio) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - if opts.Duration != 0 { - v["duration"] = strconv.FormatInt(opts.Duration, 10) - } - v["performer"] = opts.Performer - v["title"] = opts.Title - if opts.Thumb != nil { - switch m := opts.Thumb.(type) { - case string: - v["thumb"] = m - - case NamedReader: - v["thumb"] = "attach://thumb" - data["thumb"] = m - - case io.Reader: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: m} - - case []byte: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", opts.Thumb) - } - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendAudio", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendChatActionOpts is the set of optional fields for Bot.SendChatAction. -type SendChatActionOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendChatAction Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). Returns True on success. -// We only recommend using this method when a response from the bot will take a noticeable amount of time to arrive. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - action (type string): Type of action to broadcast. Choose one, depending on what the user is about to receive: typing for text messages, upload_photo for photos, record_video or upload_video for videos, record_voice or upload_voice for voice notes, upload_document for general files, choose_sticker for stickers, find_location for location data, record_video_note or upload_video_note for video notes. -// https://core.telegram.org/bots/api#sendchataction -func (bot *Bot) SendChatAction(chatId int64, action string, opts *SendChatActionOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["action"] = action - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendChatAction", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SendContactOpts is the set of optional fields for Bot.SendContact. -type SendContactOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Contact's last name - LastName string - // Additional data about the contact in the form of a vCard, 0-2048 bytes - Vcard string - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendContact Use this method to send phone contacts. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - phoneNumber (type string): Contact's phone number -// - firstName (type string): Contact's first name -// - opts (type SendContactOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendcontact -func (bot *Bot) SendContact(chatId int64, phoneNumber string, firstName string, opts *SendContactOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["phone_number"] = phoneNumber - v["first_name"] = firstName - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["last_name"] = opts.LastName - v["vcard"] = opts.Vcard - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendContact", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendDiceOpts is the set of optional fields for Bot.SendDice. -type SendDiceOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Emoji on which the dice throw animation is based. Currently, must be one of "🎲", "🎯", "🏀", "⚽", "🎳", or "🎰". Dice can have values 1-6 for "🎲", "🎯" and "🎳", values 1-5 for "🏀" and "⚽", and values 1-64 for "🎰". Defaults to "🎲" - Emoji string - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendDice Use this method to send an animated emoji that will display a random value. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - opts (type SendDiceOpts): All optional parameters. -// https://core.telegram.org/bots/api#senddice -func (bot *Bot) SendDice(chatId int64, opts *SendDiceOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["emoji"] = opts.Emoji - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendDice", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendDocumentOpts is the set of optional fields for Bot.SendDocument. -type SendDocumentOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb InputFile - // Document caption (may also be used when resending documents by file_id), 0-1024 characters after entities parsing - Caption string - // Mode for parsing entities in the document caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // Disables automatic server-side content type detection for files uploaded using multipart/form-data - DisableContentTypeDetection bool - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendDocument Use this method to send general files. On success, the sent Message is returned. Bots can currently send files of any type of up to 50 MB in size, this limit may be changed in the future. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - document (type InputFile): File to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// - opts (type SendDocumentOpts): All optional parameters. -// https://core.telegram.org/bots/api#senddocument -func (bot *Bot) SendDocument(chatId int64, document InputFile, opts *SendDocumentOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if document != nil { - switch m := document.(type) { - case string: - v["document"] = m - - case NamedReader: - v["document"] = "attach://document" - data["document"] = m - - case io.Reader: - v["document"] = "attach://document" - data["document"] = NamedFile{File: m} - - case []byte: - v["document"] = "attach://document" - data["document"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", document) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - if opts.Thumb != nil { - switch m := opts.Thumb.(type) { - case string: - v["thumb"] = m - - case NamedReader: - v["thumb"] = "attach://thumb" - data["thumb"] = m - - case io.Reader: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: m} - - case []byte: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", opts.Thumb) - } - } - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - v["disable_content_type_detection"] = strconv.FormatBool(opts.DisableContentTypeDetection) - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendDocument", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendGameOpts is the set of optional fields for Bot.SendGame. -type SendGameOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // A JSON-serialized object for an inline keyboard. If empty, one 'Play game_title' button will be shown. If not empty, the first button must launch the game. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendGame Use this method to send a game. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat -// - gameShortName (type string): Short name of the game, serves as the unique identifier for the game. Set up your games via @BotFather. -// - opts (type SendGameOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendgame -func (bot *Bot) SendGame(chatId int64, gameShortName string, opts *SendGameOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["game_short_name"] = gameShortName - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendGame", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendInvoiceOpts is the set of optional fields for Bot.SendInvoice. -type SendInvoiceOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // The maximum accepted amount for tips in the smallest units of the currency (integer, not float/double). For example, for a maximum tip of US$ 1.45 pass max_tip_amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). Defaults to 0 - MaxTipAmount int64 - // A JSON-serialized array of suggested amounts of tips in the smallest units of the currency (integer, not float/double). At most 4 suggested tip amounts can be specified. The suggested tip amounts must be positive, passed in a strictly increased order and must not exceed max_tip_amount. - SuggestedTipAmounts []int64 - // Unique deep-linking parameter. If left empty, forwarded copies of the sent message will have a Pay button, allowing multiple users to pay directly from the forwarded message, using the same invoice. If non-empty, forwarded copies of the sent message will have a URL button with a deep link to the bot (instead of a Pay button), with the value used as the start parameter - StartParameter string - // JSON-serialized data about the invoice, which will be shared with the payment provider. A detailed description of required fields should be provided by the payment provider. - ProviderData string - // URL of the product photo for the invoice. Can be a photo of the goods or a marketing image for a service. People like it better when they see what they are paying for. - PhotoUrl string - // Photo size in bytes - PhotoSize int64 - // Photo width - PhotoWidth int64 - // Photo height - PhotoHeight int64 - // Pass True if you require the user's full name to complete the order - NeedName bool - // Pass True if you require the user's phone number to complete the order - NeedPhoneNumber bool - // Pass True if you require the user's email address to complete the order - NeedEmail bool - // Pass True if you require the user's shipping address to complete the order - NeedShippingAddress bool - // Pass True if the user's phone number should be sent to provider - SendPhoneNumberToProvider bool - // Pass True if the user's email address should be sent to provider - SendEmailToProvider bool - // Pass True if the final price depends on the shipping method - IsFlexible bool - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // A JSON-serialized object for an inline keyboard. If empty, one 'Pay total price' button will be shown. If not empty, the first button must be a Pay button. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendInvoice Use this method to send invoices. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - title (type string): Product name, 1-32 characters -// - description (type string): Product description, 1-255 characters -// - payload (type string): Bot-defined invoice payload, 1-128 bytes. This will not be displayed to the user, use for your internal processes. -// - providerToken (type string): Payment provider token, obtained via @BotFather -// - currency (type string): Three-letter ISO 4217 currency code, see more on currencies -// - prices (type []LabeledPrice): Price breakdown, a JSON-serialized list of components (e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.) -// - opts (type SendInvoiceOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendinvoice -func (bot *Bot) SendInvoice(chatId int64, title string, description string, payload string, providerToken string, currency string, prices []LabeledPrice, opts *SendInvoiceOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["title"] = title - v["description"] = description - v["payload"] = payload - v["provider_token"] = providerToken - v["currency"] = currency - if prices != nil { - bs, err := json.Marshal(prices) - if err != nil { - return nil, fmt.Errorf("failed to marshal field prices: %w", err) - } - v["prices"] = string(bs) - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - if opts.MaxTipAmount != 0 { - v["max_tip_amount"] = strconv.FormatInt(opts.MaxTipAmount, 10) - } - if opts.SuggestedTipAmounts != nil { - bs, err := json.Marshal(opts.SuggestedTipAmounts) - if err != nil { - return nil, fmt.Errorf("failed to marshal field suggested_tip_amounts: %w", err) - } - v["suggested_tip_amounts"] = string(bs) - } - v["start_parameter"] = opts.StartParameter - v["provider_data"] = opts.ProviderData - v["photo_url"] = opts.PhotoUrl - if opts.PhotoSize != 0 { - v["photo_size"] = strconv.FormatInt(opts.PhotoSize, 10) - } - if opts.PhotoWidth != 0 { - v["photo_width"] = strconv.FormatInt(opts.PhotoWidth, 10) - } - if opts.PhotoHeight != 0 { - v["photo_height"] = strconv.FormatInt(opts.PhotoHeight, 10) - } - v["need_name"] = strconv.FormatBool(opts.NeedName) - v["need_phone_number"] = strconv.FormatBool(opts.NeedPhoneNumber) - v["need_email"] = strconv.FormatBool(opts.NeedEmail) - v["need_shipping_address"] = strconv.FormatBool(opts.NeedShippingAddress) - v["send_phone_number_to_provider"] = strconv.FormatBool(opts.SendPhoneNumberToProvider) - v["send_email_to_provider"] = strconv.FormatBool(opts.SendEmailToProvider) - v["is_flexible"] = strconv.FormatBool(opts.IsFlexible) - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendInvoice", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendLocationOpts is the set of optional fields for Bot.SendLocation. -type SendLocationOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // The radius of uncertainty for the location, measured in meters; 0-1500 - HorizontalAccuracy float64 - // Period in seconds for which the location will be updated (see Live Locations, should be between 60 and 86400. - LivePeriod int64 - // For live locations, a direction in which the user is moving, in degrees. Must be between 1 and 360 if specified. - Heading int64 - // For live locations, a maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified. - ProximityAlertRadius int64 - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendLocation Use this method to send point on the map. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - latitude (type float64): Latitude of the location -// - longitude (type float64): Longitude of the location -// - opts (type SendLocationOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendlocation -func (bot *Bot) SendLocation(chatId int64, latitude float64, longitude float64, opts *SendLocationOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["latitude"] = strconv.FormatFloat(latitude, 'f', -1, 64) - v["longitude"] = strconv.FormatFloat(longitude, 'f', -1, 64) - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - if opts.HorizontalAccuracy != 0.0 { - v["horizontal_accuracy"] = strconv.FormatFloat(opts.HorizontalAccuracy, 'f', -1, 64) - } - if opts.LivePeriod != 0 { - v["live_period"] = strconv.FormatInt(opts.LivePeriod, 10) - } - if opts.Heading != 0 { - v["heading"] = strconv.FormatInt(opts.Heading, 10) - } - if opts.ProximityAlertRadius != 0 { - v["proximity_alert_radius"] = strconv.FormatInt(opts.ProximityAlertRadius, 10) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendLocation", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendMediaGroupOpts is the set of optional fields for Bot.SendMediaGroup. -type SendMediaGroupOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Sends messages silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent messages from forwarding and saving - ProtectContent bool - // If the messages are a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendMediaGroup Use this method to send a group of photos, videos, documents or audios as an album. Documents and audio files can be only grouped in an album with messages of the same type. On success, an array of Messages that were sent is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - media (type []InputMedia): A JSON-serialized array describing messages to be sent, must include 2-10 items -// - opts (type SendMediaGroupOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendmediagroup -func (bot *Bot) SendMediaGroup(chatId int64, media []InputMedia, opts *SendMediaGroupOpts) ([]Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if media != nil { - var rawList []json.RawMessage - for idx, im := range media { - inputMediaBs, err := im.InputMediaParams("media"+strconv.Itoa(idx), data) - if err != nil { - return nil, fmt.Errorf("failed to marshal InputMedia list item %d for field media: %w", idx, err) - } - rawList = append(rawList, inputMediaBs) - } - bs, err := json.Marshal(rawList) - if err != nil { - return nil, fmt.Errorf("failed to marshal raw json list of InputMedia for field: media %w", err) - } - v["media"] = string(bs) - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendMediaGroup", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m []Message - return m, json.Unmarshal(r, &m) -} - -// SendMessageOpts is the set of optional fields for Bot.SendMessage. -type SendMessageOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Mode for parsing entities in the message text. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode - Entities []MessageEntity - // Disables link previews for links in this message - DisableWebPagePreview bool - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendMessage Use this method to send text messages. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - text (type string): Text of the message to be sent, 1-4096 characters after entities parsing -// - opts (type SendMessageOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendmessage -func (bot *Bot) SendMessage(chatId int64, text string, opts *SendMessageOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["text"] = text - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["parse_mode"] = opts.ParseMode - if opts.Entities != nil { - bs, err := json.Marshal(opts.Entities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field entities: %w", err) - } - v["entities"] = string(bs) - } - v["disable_web_page_preview"] = strconv.FormatBool(opts.DisableWebPagePreview) - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendMessage", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendPhotoOpts is the set of optional fields for Bot.SendPhoto. -type SendPhotoOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Photo caption (may also be used when resending photos by file_id), 0-1024 characters after entities parsing - Caption string - // Mode for parsing entities in the photo caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendPhoto Use this method to send photos. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - photo (type InputFile): Photo to send. Pass a file_id as String to send a photo that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a photo from the Internet, or upload a new photo using multipart/form-data. The photo must be at most 10 MB in size. The photo's width and height must not exceed 10000 in total. Width and height ratio must be at most 20. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// - opts (type SendPhotoOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendphoto -func (bot *Bot) SendPhoto(chatId int64, photo InputFile, opts *SendPhotoOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if photo != nil { - switch m := photo.(type) { - case string: - v["photo"] = m - - case NamedReader: - v["photo"] = "attach://photo" - data["photo"] = m - - case io.Reader: - v["photo"] = "attach://photo" - data["photo"] = NamedFile{File: m} - - case []byte: - v["photo"] = "attach://photo" - data["photo"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", photo) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendPhoto", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendPollOpts is the set of optional fields for Bot.SendPoll. -type SendPollOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // True, if the poll needs to be anonymous, defaults to True - IsAnonymous bool - // Poll type, "quiz" or "regular", defaults to "regular" - Type string - // True, if the poll allows multiple answers, ignored for polls in quiz mode, defaults to False - AllowsMultipleAnswers bool - // 0-based identifier of the correct answer option, required for polls in quiz mode - CorrectOptionId int64 - // Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters with at most 2 line feeds after entities parsing - Explanation string - // Mode for parsing entities in the explanation. See formatting options for more details. - ExplanationParseMode string - // A JSON-serialized list of special entities that appear in the poll explanation, which can be specified instead of parse_mode - ExplanationEntities []MessageEntity - // Amount of time in seconds the poll will be active after creation, 5-600. Can't be used together with close_date. - OpenPeriod int64 - // Point in time (Unix timestamp) when the poll will be automatically closed. Must be at least 5 and no more than 600 seconds in the future. Can't be used together with open_period. - CloseDate int64 - // Pass True if the poll needs to be immediately closed. This can be useful for poll preview. - IsClosed bool - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendPoll Use this method to send a native poll. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - question (type string): Poll question, 1-300 characters -// - options (type []string): A JSON-serialized list of answer options, 2-10 strings 1-100 characters each -// - opts (type SendPollOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendpoll -func (bot *Bot) SendPoll(chatId int64, question string, options []string, opts *SendPollOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["question"] = question - if options != nil { - bs, err := json.Marshal(options) - if err != nil { - return nil, fmt.Errorf("failed to marshal field options: %w", err) - } - v["options"] = string(bs) - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["is_anonymous"] = strconv.FormatBool(opts.IsAnonymous) - v["type"] = opts.Type - v["allows_multiple_answers"] = strconv.FormatBool(opts.AllowsMultipleAnswers) - if opts.Type == "quiz" { - // correct_option_id should always be set when the type is "quiz" - it doesnt need to be set for type "regular". - v["correct_option_id"] = strconv.FormatInt(opts.CorrectOptionId, 10) - } - v["explanation"] = opts.Explanation - v["explanation_parse_mode"] = opts.ExplanationParseMode - if opts.ExplanationEntities != nil { - bs, err := json.Marshal(opts.ExplanationEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field explanation_entities: %w", err) - } - v["explanation_entities"] = string(bs) - } - if opts.OpenPeriod != 0 { - v["open_period"] = strconv.FormatInt(opts.OpenPeriod, 10) - } - if opts.CloseDate != 0 { - v["close_date"] = strconv.FormatInt(opts.CloseDate, 10) - } - v["is_closed"] = strconv.FormatBool(opts.IsClosed) - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendPoll", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendStickerOpts is the set of optional fields for Bot.SendSticker. -type SendStickerOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendSticker Use this method to send static .WEBP, animated .TGS, or video .WEBM stickers. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - sticker (type InputFile): Sticker to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a .WEBP file from the Internet, or upload a new one using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// - opts (type SendStickerOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendsticker -func (bot *Bot) SendSticker(chatId int64, sticker InputFile, opts *SendStickerOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if sticker != nil { - switch m := sticker.(type) { - case string: - v["sticker"] = m - - case NamedReader: - v["sticker"] = "attach://sticker" - data["sticker"] = m - - case io.Reader: - v["sticker"] = "attach://sticker" - data["sticker"] = NamedFile{File: m} - - case []byte: - v["sticker"] = "attach://sticker" - data["sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", sticker) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendSticker", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendVenueOpts is the set of optional fields for Bot.SendVenue. -type SendVenueOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Foursquare identifier of the venue - FoursquareId string - // Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) - FoursquareType string - // Google Places identifier of the venue - GooglePlaceId string - // Google Places type of the venue. (See supported types.) - GooglePlaceType string - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendVenue Use this method to send information about a venue. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - latitude (type float64): Latitude of the venue -// - longitude (type float64): Longitude of the venue -// - title (type string): Name of the venue -// - address (type string): Address of the venue -// - opts (type SendVenueOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendvenue -func (bot *Bot) SendVenue(chatId int64, latitude float64, longitude float64, title string, address string, opts *SendVenueOpts) (*Message, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["latitude"] = strconv.FormatFloat(latitude, 'f', -1, 64) - v["longitude"] = strconv.FormatFloat(longitude, 'f', -1, 64) - v["title"] = title - v["address"] = address - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["foursquare_id"] = opts.FoursquareId - v["foursquare_type"] = opts.FoursquareType - v["google_place_id"] = opts.GooglePlaceId - v["google_place_type"] = opts.GooglePlaceType - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendVenue", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendVideoOpts is the set of optional fields for Bot.SendVideo. -type SendVideoOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Duration of sent video in seconds - Duration int64 - // Video width - Width int64 - // Video height - Height int64 - // Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb InputFile - // Video caption (may also be used when resending videos by file_id), 0-1024 characters after entities parsing - Caption string - // Mode for parsing entities in the video caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // Pass True if the uploaded video is suitable for streaming - SupportsStreaming bool - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendVideo Use this method to send video files, Telegram clients support MPEG4 videos (other formats may be sent as Document). On success, the sent Message is returned. Bots can currently send video files of up to 50 MB in size, this limit may be changed in the future. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - video (type InputFile): Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// - opts (type SendVideoOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendvideo -func (bot *Bot) SendVideo(chatId int64, video InputFile, opts *SendVideoOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if video != nil { - switch m := video.(type) { - case string: - v["video"] = m - - case NamedReader: - v["video"] = "attach://video" - data["video"] = m - - case io.Reader: - v["video"] = "attach://video" - data["video"] = NamedFile{File: m} - - case []byte: - v["video"] = "attach://video" - data["video"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", video) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - if opts.Duration != 0 { - v["duration"] = strconv.FormatInt(opts.Duration, 10) - } - if opts.Width != 0 { - v["width"] = strconv.FormatInt(opts.Width, 10) - } - if opts.Height != 0 { - v["height"] = strconv.FormatInt(opts.Height, 10) - } - if opts.Thumb != nil { - switch m := opts.Thumb.(type) { - case string: - v["thumb"] = m - - case NamedReader: - v["thumb"] = "attach://thumb" - data["thumb"] = m - - case io.Reader: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: m} - - case []byte: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", opts.Thumb) - } - } - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - v["supports_streaming"] = strconv.FormatBool(opts.SupportsStreaming) - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendVideo", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendVideoNoteOpts is the set of optional fields for Bot.SendVideoNote. -type SendVideoNoteOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Duration of sent video in seconds - Duration int64 - // Video width and height, i.e. diameter of the video message - Length int64 - // Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb InputFile - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendVideoNote As of v.4.0, Telegram clients support rounded square MPEG4 videos of up to 1 minute long. Use this method to send video messages. On success, the sent Message is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - videoNote (type InputFile): Video note to send. Pass a file_id as String to send a video note that exists on the Telegram servers (recommended) or upload a new video using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files. Sending video notes by a URL is currently unsupported -// - opts (type SendVideoNoteOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendvideonote -func (bot *Bot) SendVideoNote(chatId int64, videoNote InputFile, opts *SendVideoNoteOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if videoNote != nil { - switch m := videoNote.(type) { - case string: - v["video_note"] = m - - case NamedReader: - v["video_note"] = "attach://video_note" - data["video_note"] = m - - case io.Reader: - v["video_note"] = "attach://video_note" - data["video_note"] = NamedFile{File: m} - - case []byte: - v["video_note"] = "attach://video_note" - data["video_note"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", videoNote) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - if opts.Duration != 0 { - v["duration"] = strconv.FormatInt(opts.Duration, 10) - } - if opts.Length != 0 { - v["length"] = strconv.FormatInt(opts.Length, 10) - } - if opts.Thumb != nil { - switch m := opts.Thumb.(type) { - case string: - v["thumb"] = m - - case NamedReader: - v["thumb"] = "attach://thumb" - data["thumb"] = m - - case io.Reader: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: m} - - case []byte: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", opts.Thumb) - } - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendVideoNote", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SendVoiceOpts is the set of optional fields for Bot.SendVoice. -type SendVoiceOpts struct { - // Unique identifier for the target message thread (topic) of the forum; for forum supergroups only - MessageThreadId int64 - // Voice message caption, 0-1024 characters after entities parsing - Caption string - // Mode for parsing entities in the voice message caption. See formatting options for more details. - ParseMode string - // A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity - // Duration of the voice message in seconds - Duration int64 - // Sends the message silently. Users will receive a notification with no sound. - DisableNotification bool - // Protects the contents of the sent message from forwarding and saving - ProtectContent bool - // If the message is a reply, ID of the original message - ReplyToMessageId int64 - // Pass True if the message should be sent even if the specified replied-to message is not found - AllowSendingWithoutReply bool - // Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. - ReplyMarkup ReplyMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SendVoice Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .OGG file encoded with OPUS (other formats may be sent as Audio or Document). On success, the sent Message is returned. Bots can currently send voice messages of up to 50 MB in size, this limit may be changed in the future. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - voice (type InputFile): Audio file to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// - opts (type SendVoiceOpts): All optional parameters. -// https://core.telegram.org/bots/api#sendvoice -func (bot *Bot) SendVoice(chatId int64, voice InputFile, opts *SendVoiceOpts) (*Message, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if voice != nil { - switch m := voice.(type) { - case string: - v["voice"] = m - - case NamedReader: - v["voice"] = "attach://voice" - data["voice"] = m - - case io.Reader: - v["voice"] = "attach://voice" - data["voice"] = NamedFile{File: m} - - case []byte: - v["voice"] = "attach://voice" - data["voice"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", voice) - } - } - if opts != nil { - if opts.MessageThreadId != 0 { - v["message_thread_id"] = strconv.FormatInt(opts.MessageThreadId, 10) - } - v["caption"] = opts.Caption - v["parse_mode"] = opts.ParseMode - if opts.CaptionEntities != nil { - bs, err := json.Marshal(opts.CaptionEntities) - if err != nil { - return nil, fmt.Errorf("failed to marshal field caption_entities: %w", err) - } - v["caption_entities"] = string(bs) - } - if opts.Duration != 0 { - v["duration"] = strconv.FormatInt(opts.Duration, 10) - } - v["disable_notification"] = strconv.FormatBool(opts.DisableNotification) - v["protect_content"] = strconv.FormatBool(opts.ProtectContent) - if opts.ReplyToMessageId != 0 { - v["reply_to_message_id"] = strconv.FormatInt(opts.ReplyToMessageId, 10) - } - v["allow_sending_without_reply"] = strconv.FormatBool(opts.AllowSendingWithoutReply) - if opts.ReplyMarkup != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("sendVoice", v, data, reqOpts) - if err != nil { - return nil, err - } - - var m Message - return &m, json.Unmarshal(r, &m) -} - -// SetChatAdministratorCustomTitleOpts is the set of optional fields for Bot.SetChatAdministratorCustomTitle. -type SetChatAdministratorCustomTitleOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetChatAdministratorCustomTitle Use this method to set a custom title for an administrator in a supergroup promoted by the bot. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - userId (type int64): Unique identifier of the target user -// - customTitle (type string): New custom title for the administrator; 0-16 characters, emoji are not allowed -// https://core.telegram.org/bots/api#setchatadministratorcustomtitle -func (bot *Bot) SetChatAdministratorCustomTitle(chatId int64, userId int64, customTitle string, opts *SetChatAdministratorCustomTitleOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - v["custom_title"] = customTitle - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setChatAdministratorCustomTitle", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetChatDescriptionOpts is the set of optional fields for Bot.SetChatDescription. -type SetChatDescriptionOpts struct { - // New chat description, 0-255 characters - Description string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetChatDescription Use this method to change the description of a group, a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - opts (type SetChatDescriptionOpts): All optional parameters. -// https://core.telegram.org/bots/api#setchatdescription -func (bot *Bot) SetChatDescription(chatId int64, opts *SetChatDescriptionOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if opts != nil { - v["description"] = opts.Description - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setChatDescription", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetChatMenuButtonOpts is the set of optional fields for Bot.SetChatMenuButton. -type SetChatMenuButtonOpts struct { - // Unique identifier for the target private chat. If not specified, default bot's menu button will be changed - ChatId int64 - // A JSON-serialized object for the bot's new menu button. Defaults to MenuButtonDefault - MenuButton MenuButton - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetChatMenuButton Use this method to change the bot's menu button in a private chat, or the default menu button. Returns True on success. -// - opts (type SetChatMenuButtonOpts): All optional parameters. -// https://core.telegram.org/bots/api#setchatmenubutton -func (bot *Bot) SetChatMenuButton(opts *SetChatMenuButtonOpts) (bool, error) { - v := map[string]string{} - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - bs, err := json.Marshal(opts.MenuButton) - if err != nil { - return false, fmt.Errorf("failed to marshal field menu_button: %w", err) - } - v["menu_button"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setChatMenuButton", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetChatPermissionsOpts is the set of optional fields for Bot.SetChatPermissions. -type SetChatPermissionsOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetChatPermissions Use this method to set default chat permissions for all members. The bot must be an administrator in the group or a supergroup for this to work and must have the can_restrict_members administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - permissions (type ChatPermissions): A JSON-serialized object for new default chat permissions -// https://core.telegram.org/bots/api#setchatpermissions -func (bot *Bot) SetChatPermissions(chatId int64, permissions ChatPermissions, opts *SetChatPermissionsOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - bs, err := json.Marshal(permissions) - if err != nil { - return false, fmt.Errorf("failed to marshal field permissions: %w", err) - } - v["permissions"] = string(bs) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setChatPermissions", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetChatPhotoOpts is the set of optional fields for Bot.SetChatPhoto. -type SetChatPhotoOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetChatPhoto Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - photo (type InputFile): New chat photo, uploaded using multipart/form-data -// https://core.telegram.org/bots/api#setchatphoto -func (bot *Bot) SetChatPhoto(chatId int64, photo InputFile, opts *SetChatPhotoOpts) (bool, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if photo != nil { - switch m := photo.(type) { - case NamedReader: - v["photo"] = "attach://photo" - data["photo"] = m - - case io.Reader: - v["photo"] = "attach://photo" - data["photo"] = NamedFile{File: m} - - case []byte: - v["photo"] = "attach://photo" - data["photo"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", photo) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setChatPhoto", v, data, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetChatStickerSetOpts is the set of optional fields for Bot.SetChatStickerSet. -type SetChatStickerSetOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetChatStickerSet Use this method to set a new group sticker set for a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Use the field can_set_sticker_set optionally returned in getChat requests to check if the bot can use this method. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - stickerSetName (type string): Name of the sticker set to be set as the group sticker set -// https://core.telegram.org/bots/api#setchatstickerset -func (bot *Bot) SetChatStickerSet(chatId int64, stickerSetName string, opts *SetChatStickerSetOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["sticker_set_name"] = stickerSetName - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setChatStickerSet", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetChatTitleOpts is the set of optional fields for Bot.SetChatTitle. -type SetChatTitleOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetChatTitle Use this method to change the title of a chat. Titles can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - title (type string): New chat title, 1-128 characters -// https://core.telegram.org/bots/api#setchattitle -func (bot *Bot) SetChatTitle(chatId int64, title string, opts *SetChatTitleOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["title"] = title - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setChatTitle", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetGameScoreOpts is the set of optional fields for Bot.SetGameScore. -type SetGameScoreOpts struct { - // Pass True if the high score is allowed to decrease. This can be useful when fixing mistakes or banning cheaters - Force bool - // Pass True if the game message should not be automatically edited to include the current scoreboard - DisableEditMessage bool - // Required if inline_message_id is not specified. Unique identifier for the target chat - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the sent message - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetGameScore Use this method to set the score of the specified user in a game message. On success, if the message is not an inline message, the Message is returned, otherwise True is returned. Returns an error, if the new score is not greater than the user's current score in the chat and force is False. -// - userId (type int64): User identifier -// - score (type int64): New score, must be non-negative -// - opts (type SetGameScoreOpts): All optional parameters. -// https://core.telegram.org/bots/api#setgamescore -func (bot *Bot) SetGameScore(userId int64, score int64, opts *SetGameScoreOpts) (*Message, bool, error) { - v := map[string]string{} - v["user_id"] = strconv.FormatInt(userId, 10) - v["score"] = strconv.FormatInt(score, 10) - if opts != nil { - v["force"] = strconv.FormatBool(opts.Force) - v["disable_edit_message"] = strconv.FormatBool(opts.DisableEditMessage) - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setGameScore", v, nil, reqOpts) - if err != nil { - return nil, false, err - } - - var m Message - if err := json.Unmarshal(r, &m); err != nil { - var b bool - if err := json.Unmarshal(r, &b); err != nil { - return nil, false, err - } - return nil, b, nil - } - return &m, true, nil - -} - -// SetMyCommandsOpts is the set of optional fields for Bot.SetMyCommands. -type SetMyCommandsOpts struct { - // A JSON-serialized object, describing scope of users for which the commands are relevant. Defaults to BotCommandScopeDefault. - Scope BotCommandScope - // A two-letter ISO 639-1 language code. If empty, commands will be applied to all users from the given scope, for whose language there are no dedicated commands - LanguageCode string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetMyCommands Use this method to change the list of the bot's commands. See this manual for more details about bot commands. Returns True on success. -// - commands (type []BotCommand): A JSON-serialized list of bot commands to be set as the list of the bot's commands. At most 100 commands can be specified. -// - opts (type SetMyCommandsOpts): All optional parameters. -// https://core.telegram.org/bots/api#setmycommands -func (bot *Bot) SetMyCommands(commands []BotCommand, opts *SetMyCommandsOpts) (bool, error) { - v := map[string]string{} - if commands != nil { - bs, err := json.Marshal(commands) - if err != nil { - return false, fmt.Errorf("failed to marshal field commands: %w", err) - } - v["commands"] = string(bs) - } - if opts != nil { - bs, err := json.Marshal(opts.Scope) - if err != nil { - return false, fmt.Errorf("failed to marshal field scope: %w", err) - } - v["scope"] = string(bs) - v["language_code"] = opts.LanguageCode - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setMyCommands", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetMyDefaultAdministratorRightsOpts is the set of optional fields for Bot.SetMyDefaultAdministratorRights. -type SetMyDefaultAdministratorRightsOpts struct { - // A JSON-serialized object describing new default administrator rights. If not specified, the default administrator rights will be cleared. - Rights ChatAdministratorRights - // Pass True to change the default administrator rights of the bot in channels. Otherwise, the default administrator rights of the bot for groups and supergroups will be changed. - ForChannels bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetMyDefaultAdministratorRights Use this method to change the default administrator rights requested by the bot when it's added as an administrator to groups or channels. These rights will be suggested to users, but they are are free to modify the list before adding the bot. Returns True on success. -// - opts (type SetMyDefaultAdministratorRightsOpts): All optional parameters. -// https://core.telegram.org/bots/api#setmydefaultadministratorrights -func (bot *Bot) SetMyDefaultAdministratorRights(opts *SetMyDefaultAdministratorRightsOpts) (bool, error) { - v := map[string]string{} - if opts != nil { - bs, err := json.Marshal(opts.Rights) - if err != nil { - return false, fmt.Errorf("failed to marshal field rights: %w", err) - } - v["rights"] = string(bs) - v["for_channels"] = strconv.FormatBool(opts.ForChannels) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setMyDefaultAdministratorRights", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetPassportDataErrorsOpts is the set of optional fields for Bot.SetPassportDataErrors. -type SetPassportDataErrorsOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetPassportDataErrors Informs a user that some of the Telegram Passport elements they provided contains errors. The user will not be able to re-submit their Passport to you until the errors are fixed (the contents of the field for which you returned the error must change). Returns True on success. -// Use this if the data submitted by the user doesn't satisfy the standards your service requires for any reason. For example, if a birthday date seems invalid, a submitted document is blurry, a scan shows evidence of tampering, etc. Supply some details in the error message to make sure the user knows how to correct the issues. -// - userId (type int64): User identifier -// - errors (type []PassportElementError): A JSON-serialized array describing the errors -// https://core.telegram.org/bots/api#setpassportdataerrors -func (bot *Bot) SetPassportDataErrors(userId int64, errors []PassportElementError, opts *SetPassportDataErrorsOpts) (bool, error) { - v := map[string]string{} - v["user_id"] = strconv.FormatInt(userId, 10) - if errors != nil { - bs, err := json.Marshal(errors) - if err != nil { - return false, fmt.Errorf("failed to marshal field errors: %w", err) - } - v["errors"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setPassportDataErrors", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetStickerPositionInSetOpts is the set of optional fields for Bot.SetStickerPositionInSet. -type SetStickerPositionInSetOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetStickerPositionInSet Use this method to move a sticker in a set created by the bot to a specific position. Returns True on success. -// - sticker (type string): File identifier of the sticker -// - position (type int64): New sticker position in the set, zero-based -// https://core.telegram.org/bots/api#setstickerpositioninset -func (bot *Bot) SetStickerPositionInSet(sticker string, position int64, opts *SetStickerPositionInSetOpts) (bool, error) { - v := map[string]string{} - v["sticker"] = sticker - v["position"] = strconv.FormatInt(position, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setStickerPositionInSet", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetStickerSetThumbOpts is the set of optional fields for Bot.SetStickerSetThumb. -type SetStickerSetThumbOpts struct { - // A PNG image with the thumbnail, must be up to 128 kilobytes in size and have width and height exactly 100px, or a TGS animation with the thumbnail up to 32 kilobytes in size; see https://core.telegram.org/stickers#animated-sticker-requirements for animated sticker technical requirements, or a WEBM video with the thumbnail up to 32 kilobytes in size; see https://core.telegram.org/stickers#video-sticker-requirements for video sticker technical requirements. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More information on Sending Files: https://core.telegram.org/bots/api#sending-files. Animated sticker set thumbnails can't be uploaded via HTTP URL. - Thumb InputFile - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetStickerSetThumb Use this method to set the thumbnail of a sticker set. Animated thumbnails can be set for animated sticker sets only. Video thumbnails can be set only for video sticker sets only. Returns True on success. -// - name (type string): Sticker set name -// - userId (type int64): User identifier of the sticker set owner -// - opts (type SetStickerSetThumbOpts): All optional parameters. -// https://core.telegram.org/bots/api#setstickersetthumb -func (bot *Bot) SetStickerSetThumb(name string, userId int64, opts *SetStickerSetThumbOpts) (bool, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["name"] = name - v["user_id"] = strconv.FormatInt(userId, 10) - if opts != nil { - if opts.Thumb != nil { - switch m := opts.Thumb.(type) { - case string: - v["thumb"] = m - - case NamedReader: - v["thumb"] = "attach://thumb" - data["thumb"] = m - - case io.Reader: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: m} - - case []byte: - v["thumb"] = "attach://thumb" - data["thumb"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.Thumb) - } - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setStickerSetThumb", v, data, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// SetWebhookOpts is the set of optional fields for Bot.SetWebhook. -type SetWebhookOpts struct { - // Upload your public key certificate so that the root certificate in use can be checked. See our self-signed guide for details. - Certificate InputFile - // The fixed IP address which will be used to send webhook requests instead of the IP address resolved through DNS - IpAddress string - // The maximum allowed number of simultaneous HTTPS connections to the webhook for update delivery, 1-100. Defaults to 40. Use lower values to limit the load on your bot's server, and higher values to increase your bot's throughput. - MaxConnections int64 - // A JSON-serialized list of the update types you want your bot to receive. For example, specify ["message", "edited_channel_post", "callback_query"] to only receive updates of these types. See Update for a complete list of available update types. Specify an empty list to receive all update types except chat_member (default). If not specified, the previous setting will be used. Please note that this parameter doesn't affect updates created before the call to the setWebhook, so unwanted updates may be received for a short period of time. - AllowedUpdates []string - // Pass True to drop all pending updates - DropPendingUpdates bool - // A secret token to be sent in a header "X-Telegram-Bot-Api-Secret-Token" in every webhook request, 1-256 characters. Only characters A-Z, a-z, 0-9, _ and - are allowed. The header is useful to ensure that the request comes from a webhook set by you. - SecretToken string - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// SetWebhook Use this method to specify a URL and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified URL, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts. Returns True on success. -// If you'd like to make sure that the webhook was set by you, you can specify secret data in the parameter secret_token. If specified, the request will contain a header "X-Telegram-Bot-Api-Secret-Token" with the secret token as content. -// - url (type string): HTTPS URL to send updates to. Use an empty string to remove webhook integration -// - opts (type SetWebhookOpts): All optional parameters. -// https://core.telegram.org/bots/api#setwebhook -func (bot *Bot) SetWebhook(url string, opts *SetWebhookOpts) (bool, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["url"] = url - if opts != nil { - if opts.Certificate != nil { - switch m := opts.Certificate.(type) { - case NamedReader: - v["certificate"] = "attach://certificate" - data["certificate"] = m - - case io.Reader: - v["certificate"] = "attach://certificate" - data["certificate"] = NamedFile{File: m} - - case []byte: - v["certificate"] = "attach://certificate" - data["certificate"] = NamedFile{File: bytes.NewReader(m)} - - default: - return false, fmt.Errorf("unknown type for InputFile: %T", opts.Certificate) - } - } - v["ip_address"] = opts.IpAddress - if opts.MaxConnections != 0 { - v["max_connections"] = strconv.FormatInt(opts.MaxConnections, 10) - } - if opts.AllowedUpdates != nil { - bs, err := json.Marshal(opts.AllowedUpdates) - if err != nil { - return false, fmt.Errorf("failed to marshal field allowed_updates: %w", err) - } - v["allowed_updates"] = string(bs) - } - v["drop_pending_updates"] = strconv.FormatBool(opts.DropPendingUpdates) - v["secret_token"] = opts.SecretToken - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("setWebhook", v, data, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// StopMessageLiveLocationOpts is the set of optional fields for Bot.StopMessageLiveLocation. -type StopMessageLiveLocationOpts struct { - // Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) - ChatId int64 - // Required if inline_message_id is not specified. Identifier of the message with live location to stop - MessageId int64 - // Required if chat_id and message_id are not specified. Identifier of the inline message - InlineMessageId string - // A JSON-serialized object for a new inline keyboard. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// StopMessageLiveLocation Use this method to stop updating a live location message before live_period expires. On success, if the message is not an inline message, the edited Message is returned, otherwise True is returned. -// - opts (type StopMessageLiveLocationOpts): All optional parameters. -// https://core.telegram.org/bots/api#stopmessagelivelocation -func (bot *Bot) StopMessageLiveLocation(opts *StopMessageLiveLocationOpts) (*Message, bool, error) { - v := map[string]string{} - if opts != nil { - if opts.ChatId != 0 { - v["chat_id"] = strconv.FormatInt(opts.ChatId, 10) - } - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - v["inline_message_id"] = opts.InlineMessageId - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("stopMessageLiveLocation", v, nil, reqOpts) - if err != nil { - return nil, false, err - } - - var m Message - if err := json.Unmarshal(r, &m); err != nil { - var b bool - if err := json.Unmarshal(r, &b); err != nil { - return nil, false, err - } - return nil, b, nil - } - return &m, true, nil - -} - -// StopPollOpts is the set of optional fields for Bot.StopPoll. -type StopPollOpts struct { - // A JSON-serialized object for a new message inline keyboard. - ReplyMarkup InlineKeyboardMarkup - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// StopPoll Use this method to stop a poll which was sent by the bot. On success, the stopped Poll is returned. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - messageId (type int64): Identifier of the original message with the poll -// - opts (type StopPollOpts): All optional parameters. -// https://core.telegram.org/bots/api#stoppoll -func (bot *Bot) StopPoll(chatId int64, messageId int64, opts *StopPollOpts) (*Poll, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_id"] = strconv.FormatInt(messageId, 10) - if opts != nil { - bs, err := json.Marshal(opts.ReplyMarkup) - if err != nil { - return nil, fmt.Errorf("failed to marshal field reply_markup: %w", err) - } - v["reply_markup"] = string(bs) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("stopPoll", v, nil, reqOpts) - if err != nil { - return nil, err - } - - var p Poll - return &p, json.Unmarshal(r, &p) -} - -// UnbanChatMemberOpts is the set of optional fields for Bot.UnbanChatMember. -type UnbanChatMemberOpts struct { - // Do nothing if the user is not banned - OnlyIfBanned bool - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// UnbanChatMember Use this method to unban a previously banned user in a supergroup or channel. The user will not return to the group or channel automatically, but will be able to join via link, etc. The bot must be an administrator for this to work. By default, this method guarantees that after the call the user is not a member of the chat, but will be able to join it. So if the user is a member of the chat they will also be removed from the chat. If you don't want this, use the parameter only_if_banned. Returns True on success. -// - chatId (type int64): Unique identifier for the target group or username of the target supergroup or channel (in the format @channelusername) -// - userId (type int64): Unique identifier of the target user -// - opts (type UnbanChatMemberOpts): All optional parameters. -// https://core.telegram.org/bots/api#unbanchatmember -func (bot *Bot) UnbanChatMember(chatId int64, userId int64, opts *UnbanChatMemberOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["user_id"] = strconv.FormatInt(userId, 10) - if opts != nil { - v["only_if_banned"] = strconv.FormatBool(opts.OnlyIfBanned) - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("unbanChatMember", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// UnbanChatSenderChatOpts is the set of optional fields for Bot.UnbanChatSenderChat. -type UnbanChatSenderChatOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// UnbanChatSenderChat Use this method to unban a previously banned channel chat in a supergroup or channel. The bot must be an administrator for this to work and must have the appropriate administrator rights. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - senderChatId (type int64): Unique identifier of the target sender chat -// https://core.telegram.org/bots/api#unbanchatsenderchat -func (bot *Bot) UnbanChatSenderChat(chatId int64, senderChatId int64, opts *UnbanChatSenderChatOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["sender_chat_id"] = strconv.FormatInt(senderChatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("unbanChatSenderChat", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// UnpinAllChatMessagesOpts is the set of optional fields for Bot.UnpinAllChatMessages. -type UnpinAllChatMessagesOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// UnpinAllChatMessages Use this method to clear the list of pinned messages in a chat. If the chat is not a private chat, the bot must be an administrator in the chat for this to work and must have the 'can_pin_messages' administrator right in a supergroup or 'can_edit_messages' administrator right in a channel. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// https://core.telegram.org/bots/api#unpinallchatmessages -func (bot *Bot) UnpinAllChatMessages(chatId int64, opts *UnpinAllChatMessagesOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("unpinAllChatMessages", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// UnpinAllForumTopicMessagesOpts is the set of optional fields for Bot.UnpinAllForumTopicMessages. -type UnpinAllForumTopicMessagesOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// UnpinAllForumTopicMessages Use this method to clear the list of pinned messages in a forum topic. The bot must be an administrator in the chat for this to work and must have the can_pin_messages administrator right in the supergroup. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) -// - messageThreadId (type int64): Unique identifier for the target message thread of the forum topic -// https://core.telegram.org/bots/api#unpinallforumtopicmessages -func (bot *Bot) UnpinAllForumTopicMessages(chatId int64, messageThreadId int64, opts *UnpinAllForumTopicMessagesOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - v["message_thread_id"] = strconv.FormatInt(messageThreadId, 10) - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("unpinAllForumTopicMessages", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// UnpinChatMessageOpts is the set of optional fields for Bot.UnpinChatMessage. -type UnpinChatMessageOpts struct { - // Identifier of a message to unpin. If not specified, the most recent pinned message (by sending date) will be unpinned. - MessageId int64 - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// UnpinChatMessage Use this method to remove a message from the list of pinned messages in a chat. If the chat is not a private chat, the bot must be an administrator in the chat for this to work and must have the 'can_pin_messages' administrator right in a supergroup or 'can_edit_messages' administrator right in a channel. Returns True on success. -// - chatId (type int64): Unique identifier for the target chat or username of the target channel (in the format @channelusername) -// - opts (type UnpinChatMessageOpts): All optional parameters. -// https://core.telegram.org/bots/api#unpinchatmessage -func (bot *Bot) UnpinChatMessage(chatId int64, opts *UnpinChatMessageOpts) (bool, error) { - v := map[string]string{} - v["chat_id"] = strconv.FormatInt(chatId, 10) - if opts != nil { - if opts.MessageId != 0 { - v["message_id"] = strconv.FormatInt(opts.MessageId, 10) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("unpinChatMessage", v, nil, reqOpts) - if err != nil { - return false, err - } - - var b bool - return b, json.Unmarshal(r, &b) -} - -// UploadStickerFileOpts is the set of optional fields for Bot.UploadStickerFile. -type UploadStickerFileOpts struct { - // RequestOpts are an additional optional field to configure timeouts for individual requests - RequestOpts *RequestOpts -} - -// UploadStickerFile Use this method to upload a .PNG file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success. -// - userId (type int64): User identifier of sticker file owner -// - pngSticker (type InputFile): PNG image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More information on Sending Files: https://core.telegram.org/bots/api#sending-files -// https://core.telegram.org/bots/api#uploadstickerfile -func (bot *Bot) UploadStickerFile(userId int64, pngSticker InputFile, opts *UploadStickerFileOpts) (*File, error) { - v := map[string]string{} - data := map[string]NamedReader{} - v["user_id"] = strconv.FormatInt(userId, 10) - if pngSticker != nil { - switch m := pngSticker.(type) { - case NamedReader: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = m - - case io.Reader: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = NamedFile{File: m} - - case []byte: - v["png_sticker"] = "attach://png_sticker" - data["png_sticker"] = NamedFile{File: bytes.NewReader(m)} - - default: - return nil, fmt.Errorf("unknown type for InputFile: %T", pngSticker) - } - } - - var reqOpts *RequestOpts - if opts != nil { - reqOpts = opts.RequestOpts - } - - r, err := bot.Request("uploadStickerFile", v, data, reqOpts) - if err != nil { - return nil, err - } - - var f File - return &f, json.Unmarshal(r, &f) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_types.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_types.go deleted file mode 100644 index 6fcb1af..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/gen_types.go +++ /dev/null @@ -1,4962 +0,0 @@ -// THIS FILE IS AUTOGENERATED. DO NOT EDIT. -// Regen by running 'go generate' in the repo root. - -package gotgbot - -import ( - "encoding/json" - "fmt" - "io" -) - -type ReplyMarkup interface { - replyMarkup() -} - -// Animation This object represents an animation file (GIF or H.264/MPEG-4 AVC video without sound). -// https://core.telegram.org/bots/api#animation -type Animation struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Video width as defined by sender - Width int64 `json:"width"` - // Video height as defined by sender - Height int64 `json:"height"` - // Duration of the video in seconds as defined by sender - Duration int64 `json:"duration"` - // Optional. Animation thumbnail as defined by sender - Thumb *PhotoSize `json:"thumb,omitempty"` - // Optional. Original animation filename as defined by sender - FileName string `json:"file_name,omitempty"` - // Optional. MIME type of the file as defined by sender - MimeType string `json:"mime_type,omitempty"` - // Optional. File size in bytes. It can be bigger than 2^31 and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this value. - FileSize int64 `json:"file_size,omitempty"` -} - -// Audio This object represents an audio file to be treated as music by the Telegram clients. -// https://core.telegram.org/bots/api#audio -type Audio struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Duration of the audio in seconds as defined by sender - Duration int64 `json:"duration"` - // Optional. Performer of the audio as defined by sender or by audio tags - Performer string `json:"performer,omitempty"` - // Optional. Title of the audio as defined by sender or by audio tags - Title string `json:"title,omitempty"` - // Optional. Original filename as defined by sender - FileName string `json:"file_name,omitempty"` - // Optional. MIME type of the file as defined by sender - MimeType string `json:"mime_type,omitempty"` - // Optional. File size in bytes. It can be bigger than 2^31 and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this value. - FileSize int64 `json:"file_size,omitempty"` - // Optional. Thumbnail of the album cover to which the music file belongs - Thumb *PhotoSize `json:"thumb,omitempty"` -} - -// BotCommand This object represents a bot command. -// https://core.telegram.org/bots/api#botcommand -type BotCommand struct { - // Text of the command; 1-32 characters. Can contain only lowercase English letters, digits and underscores. - Command string `json:"command"` - // Description of the command; 1-256 characters. - Description string `json:"description"` -} - -// BotCommandScope This object represents the scope to which bot commands are applied. Currently, the following 7 scopes are supported: -// - BotCommandScopeDefault -// - BotCommandScopeAllPrivateChats -// - BotCommandScopeAllGroupChats -// - BotCommandScopeAllChatAdministrators -// - BotCommandScopeChat -// - BotCommandScopeChatAdministrators -// - BotCommandScopeChatMember -// https://core.telegram.org/bots/api#botcommandscope -type BotCommandScope interface { - GetType() string - botCommandScope() - // MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with complex telegram types in a non-generic world. - MergeBotCommandScope() MergedBotCommandScope -} - -// MergedBotCommandScope is a helper type to simplify interactions with the various BotCommandScope subtypes. -type MergedBotCommandScope struct { - // Scope type, must be default - Type string `json:"type"` - // Optional. Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) (Only for chat, chat_administrators, chat_member) - ChatId int64 `json:"chat_id,omitempty"` - // Optional. Unique identifier of the target user (Only for chat_member) - UserId int64 `json:"user_id,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MergedBotCommandScope) GetType() string { - return v.Type -} - -// MergedBotCommandScope.botCommandScope is a dummy method to avoid interface implementation. -func (v MergedBotCommandScope) botCommandScope() {} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v MergedBotCommandScope) MergeBotCommandScope() MergedBotCommandScope { - return v -} - -// BotCommandScopeAllChatAdministrators Represents the scope of bot commands, covering all group and supergroup chat administrators. -// https://core.telegram.org/bots/api#botcommandscopeallchatadministrators -type BotCommandScopeAllChatAdministrators struct{} - -// GetType is a helper method to easily access the common fields of an interface. -func (v BotCommandScopeAllChatAdministrators) GetType() string { - return "all_chat_administrators" -} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v BotCommandScopeAllChatAdministrators) MergeBotCommandScope() MergedBotCommandScope { - return MergedBotCommandScope{ - Type: "all_chat_administrators", - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v BotCommandScopeAllChatAdministrators) MarshalJSON() ([]byte, error) { - type alias BotCommandScopeAllChatAdministrators - a := struct { - Type string `json:"type"` - alias - }{ - Type: "all_chat_administrators", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// BotCommandScopeAllChatAdministrators.botCommandScope is a dummy method to avoid interface implementation. -func (v BotCommandScopeAllChatAdministrators) botCommandScope() {} - -// BotCommandScopeAllGroupChats Represents the scope of bot commands, covering all group and supergroup chats. -// https://core.telegram.org/bots/api#botcommandscopeallgroupchats -type BotCommandScopeAllGroupChats struct{} - -// GetType is a helper method to easily access the common fields of an interface. -func (v BotCommandScopeAllGroupChats) GetType() string { - return "all_group_chats" -} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v BotCommandScopeAllGroupChats) MergeBotCommandScope() MergedBotCommandScope { - return MergedBotCommandScope{ - Type: "all_group_chats", - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v BotCommandScopeAllGroupChats) MarshalJSON() ([]byte, error) { - type alias BotCommandScopeAllGroupChats - a := struct { - Type string `json:"type"` - alias - }{ - Type: "all_group_chats", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// BotCommandScopeAllGroupChats.botCommandScope is a dummy method to avoid interface implementation. -func (v BotCommandScopeAllGroupChats) botCommandScope() {} - -// BotCommandScopeAllPrivateChats Represents the scope of bot commands, covering all private chats. -// https://core.telegram.org/bots/api#botcommandscopeallprivatechats -type BotCommandScopeAllPrivateChats struct{} - -// GetType is a helper method to easily access the common fields of an interface. -func (v BotCommandScopeAllPrivateChats) GetType() string { - return "all_private_chats" -} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v BotCommandScopeAllPrivateChats) MergeBotCommandScope() MergedBotCommandScope { - return MergedBotCommandScope{ - Type: "all_private_chats", - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v BotCommandScopeAllPrivateChats) MarshalJSON() ([]byte, error) { - type alias BotCommandScopeAllPrivateChats - a := struct { - Type string `json:"type"` - alias - }{ - Type: "all_private_chats", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// BotCommandScopeAllPrivateChats.botCommandScope is a dummy method to avoid interface implementation. -func (v BotCommandScopeAllPrivateChats) botCommandScope() {} - -// BotCommandScopeChat Represents the scope of bot commands, covering a specific chat. -// https://core.telegram.org/bots/api#botcommandscopechat -type BotCommandScopeChat struct { - // Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) - ChatId int64 `json:"chat_id"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v BotCommandScopeChat) GetType() string { - return "chat" -} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v BotCommandScopeChat) MergeBotCommandScope() MergedBotCommandScope { - return MergedBotCommandScope{ - Type: "chat", - ChatId: v.ChatId, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v BotCommandScopeChat) MarshalJSON() ([]byte, error) { - type alias BotCommandScopeChat - a := struct { - Type string `json:"type"` - alias - }{ - Type: "chat", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// BotCommandScopeChat.botCommandScope is a dummy method to avoid interface implementation. -func (v BotCommandScopeChat) botCommandScope() {} - -// BotCommandScopeChatAdministrators Represents the scope of bot commands, covering all administrators of a specific group or supergroup chat. -// https://core.telegram.org/bots/api#botcommandscopechatadministrators -type BotCommandScopeChatAdministrators struct { - // Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) - ChatId int64 `json:"chat_id"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v BotCommandScopeChatAdministrators) GetType() string { - return "chat_administrators" -} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v BotCommandScopeChatAdministrators) MergeBotCommandScope() MergedBotCommandScope { - return MergedBotCommandScope{ - Type: "chat_administrators", - ChatId: v.ChatId, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v BotCommandScopeChatAdministrators) MarshalJSON() ([]byte, error) { - type alias BotCommandScopeChatAdministrators - a := struct { - Type string `json:"type"` - alias - }{ - Type: "chat_administrators", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// BotCommandScopeChatAdministrators.botCommandScope is a dummy method to avoid interface implementation. -func (v BotCommandScopeChatAdministrators) botCommandScope() {} - -// BotCommandScopeChatMember Represents the scope of bot commands, covering a specific member of a group or supergroup chat. -// https://core.telegram.org/bots/api#botcommandscopechatmember -type BotCommandScopeChatMember struct { - // Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) - ChatId int64 `json:"chat_id"` - // Unique identifier of the target user - UserId int64 `json:"user_id"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v BotCommandScopeChatMember) GetType() string { - return "chat_member" -} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v BotCommandScopeChatMember) MergeBotCommandScope() MergedBotCommandScope { - return MergedBotCommandScope{ - Type: "chat_member", - ChatId: v.ChatId, - UserId: v.UserId, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v BotCommandScopeChatMember) MarshalJSON() ([]byte, error) { - type alias BotCommandScopeChatMember - a := struct { - Type string `json:"type"` - alias - }{ - Type: "chat_member", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// BotCommandScopeChatMember.botCommandScope is a dummy method to avoid interface implementation. -func (v BotCommandScopeChatMember) botCommandScope() {} - -// BotCommandScopeDefault Represents the default scope of bot commands. Default commands are used if no commands with a narrower scope are specified for the user. -// https://core.telegram.org/bots/api#botcommandscopedefault -type BotCommandScopeDefault struct{} - -// GetType is a helper method to easily access the common fields of an interface. -func (v BotCommandScopeDefault) GetType() string { - return "default" -} - -// MergeBotCommandScope returns a MergedBotCommandScope struct to simplify working with types in a non-generic world. -func (v BotCommandScopeDefault) MergeBotCommandScope() MergedBotCommandScope { - return MergedBotCommandScope{ - Type: "default", - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v BotCommandScopeDefault) MarshalJSON() ([]byte, error) { - type alias BotCommandScopeDefault - a := struct { - Type string `json:"type"` - alias - }{ - Type: "default", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// BotCommandScopeDefault.botCommandScope is a dummy method to avoid interface implementation. -func (v BotCommandScopeDefault) botCommandScope() {} - -// CallbackGame A placeholder, currently holds no information. Use BotFather to set up your game. -// https://core.telegram.org/bots/api#callbackgame -type CallbackGame struct{} - -// CallbackQuery This object represents an incoming callback query from a callback button in an inline keyboard. If the button that originated the query was attached to a message sent by the bot, the field message will be present. If the button was attached to a message sent via the bot (in inline mode), the field inline_message_id will be present. Exactly one of the fields data or game_short_name will be present. -// https://core.telegram.org/bots/api#callbackquery -type CallbackQuery struct { - // Unique identifier for this query - Id string `json:"id"` - // Sender - From User `json:"from"` - // Optional. Message with the callback button that originated the query. Note that message content and message date will not be available if the message is too old - Message *Message `json:"message,omitempty"` - // Optional. Identifier of the message sent via the bot in inline mode, that originated the query. - InlineMessageId string `json:"inline_message_id,omitempty"` - // Global identifier, uniquely corresponding to the chat to which the message with the callback button was sent. Useful for high scores in games. - ChatInstance string `json:"chat_instance"` - // Optional. Data associated with the callback button. Be aware that the message originated the query can contain no callback buttons with this data. - Data string `json:"data,omitempty"` - // Optional. Short name of a Game to be returned, serves as the unique identifier for the game - GameShortName string `json:"game_short_name,omitempty"` -} - -// Chat This object represents a chat. -// https://core.telegram.org/bots/api#chat -type Chat struct { - // Unique identifier for this chat. This number may have more than 32 significant bits and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this identifier. - Id int64 `json:"id"` - // Type of chat, can be either "private", "group", "supergroup" or "channel" - Type string `json:"type"` - // Optional. Title, for supergroups, channels and group chats - Title string `json:"title,omitempty"` - // Optional. Username, for private chats, supergroups and channels if available - Username string `json:"username,omitempty"` - // Optional. First name of the other party in a private chat - FirstName string `json:"first_name,omitempty"` - // Optional. Last name of the other party in a private chat - LastName string `json:"last_name,omitempty"` - // Optional. True, if the supergroup chat is a forum (has topics enabled) - IsForum bool `json:"is_forum,omitempty"` - // Optional. Chat photo. Returned only in getChat. - Photo *ChatPhoto `json:"photo,omitempty"` - // Optional. If non-empty, the list of all active chat usernames; for private chats, supergroups and channels. Returned only in getChat. - ActiveUsernames []string `json:"active_usernames,omitempty"` - // Optional. Custom emoji identifier of emoji status of the other party in a private chat. Returned only in getChat. - EmojiStatusCustomEmojiId string `json:"emoji_status_custom_emoji_id,omitempty"` - // Optional. Bio of the other party in a private chat. Returned only in getChat. - Bio string `json:"bio,omitempty"` - // Optional. True, if privacy settings of the other party in the private chat allows to use tg://user?id= links only in chats with the user. Returned only in getChat. - HasPrivateForwards bool `json:"has_private_forwards,omitempty"` - // Optional. True, if the privacy settings of the other party restrict sending voice and video note messages in the private chat. Returned only in getChat. - HasRestrictedVoiceAndVideoMessages bool `json:"has_restricted_voice_and_video_messages,omitempty"` - // Optional. True, if users need to join the supergroup before they can send messages. Returned only in getChat. - JoinToSendMessages bool `json:"join_to_send_messages,omitempty"` - // Optional. True, if all users directly joining the supergroup need to be approved by supergroup administrators. Returned only in getChat. - JoinByRequest bool `json:"join_by_request,omitempty"` - // Optional. Description, for groups, supergroups and channel chats. Returned only in getChat. - Description string `json:"description,omitempty"` - // Optional. Primary invite link, for groups, supergroups and channel chats. Returned only in getChat. - InviteLink string `json:"invite_link,omitempty"` - // Optional. The most recent pinned message (by sending date). Returned only in getChat. - PinnedMessage *Message `json:"pinned_message,omitempty"` - // Optional. Default chat member permissions, for groups and supergroups. Returned only in getChat. - Permissions *ChatPermissions `json:"permissions,omitempty"` - // Optional. For supergroups, the minimum allowed delay between consecutive messages sent by each unpriviledged user; in seconds. Returned only in getChat. - SlowModeDelay int64 `json:"slow_mode_delay,omitempty"` - // Optional. The time after which all messages sent to the chat will be automatically deleted; in seconds. Returned only in getChat. - MessageAutoDeleteTime int64 `json:"message_auto_delete_time,omitempty"` - // Optional. True, if messages from the chat can't be forwarded to other chats. Returned only in getChat. - HasProtectedContent bool `json:"has_protected_content,omitempty"` - // Optional. For supergroups, name of group sticker set. Returned only in getChat. - StickerSetName string `json:"sticker_set_name,omitempty"` - // Optional. True, if the bot can change the group sticker set. Returned only in getChat. - CanSetStickerSet bool `json:"can_set_sticker_set,omitempty"` - // Optional. Unique identifier for the linked chat, i.e. the discussion group identifier for a channel and vice versa; for supergroups and channel chats. This identifier may be greater than 32 bits and some programming languages may have difficulty/silent defects in interpreting it. But it is smaller than 52 bits, so a signed 64 bit integer or double-precision float type are safe for storing this identifier. Returned only in getChat. - LinkedChatId int64 `json:"linked_chat_id,omitempty"` - // Optional. For supergroups, the location to which the supergroup is connected. Returned only in getChat. - Location *ChatLocation `json:"location,omitempty"` -} - -// ChatAdministratorRights Represents the rights of an administrator in a chat. -// https://core.telegram.org/bots/api#chatadministratorrights -type ChatAdministratorRights struct { - // True, if the user's presence in the chat is hidden - IsAnonymous bool `json:"is_anonymous"` - // True, if the administrator can access the chat event log, chat statistics, message statistics in channels, see channel members, see anonymous administrators in supergroups and ignore slow mode. Implied by any other administrator privilege - CanManageChat bool `json:"can_manage_chat"` - // True, if the administrator can delete messages of other users - CanDeleteMessages bool `json:"can_delete_messages"` - // True, if the administrator can manage video chats - CanManageVideoChats bool `json:"can_manage_video_chats"` - // True, if the administrator can restrict, ban or unban chat members - CanRestrictMembers bool `json:"can_restrict_members"` - // True, if the administrator can add new administrators with a subset of their own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by the user) - CanPromoteMembers bool `json:"can_promote_members"` - // True, if the user is allowed to change the chat title, photo and other settings - CanChangeInfo bool `json:"can_change_info"` - // True, if the user is allowed to invite new users to the chat - CanInviteUsers bool `json:"can_invite_users"` - // Optional. True, if the administrator can post in the channel; channels only - CanPostMessages bool `json:"can_post_messages,omitempty"` - // Optional. True, if the administrator can edit messages of other users and can pin messages; channels only - CanEditMessages bool `json:"can_edit_messages,omitempty"` - // Optional. True, if the user is allowed to pin messages; groups and supergroups only - CanPinMessages bool `json:"can_pin_messages,omitempty"` - // Optional. True, if the user is allowed to create, rename, close, and reopen forum topics; supergroups only - CanManageTopics bool `json:"can_manage_topics,omitempty"` -} - -// ChatInviteLink Represents an invite link for a chat. -// https://core.telegram.org/bots/api#chatinvitelink -type ChatInviteLink struct { - // The invite link. If the link was created by another chat administrator, then the second part of the link will be replaced with "...". - InviteLink string `json:"invite_link"` - // Creator of the link - Creator User `json:"creator"` - // True, if users joining the chat via the link need to be approved by chat administrators - CreatesJoinRequest bool `json:"creates_join_request"` - // True, if the link is primary - IsPrimary bool `json:"is_primary"` - // True, if the link is revoked - IsRevoked bool `json:"is_revoked"` - // Optional. Invite link name - Name string `json:"name,omitempty"` - // Optional. Point in time (Unix timestamp) when the link will expire or has been expired - ExpireDate int64 `json:"expire_date,omitempty"` - // Optional. The maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999 - MemberLimit int64 `json:"member_limit,omitempty"` - // Optional. Number of pending join requests created using this link - PendingJoinRequestCount int64 `json:"pending_join_request_count,omitempty"` -} - -// ChatJoinRequest Represents a join request sent to a chat. -// https://core.telegram.org/bots/api#chatjoinrequest -type ChatJoinRequest struct { - // Chat to which the request was sent - Chat Chat `json:"chat"` - // User that sent the join request - From User `json:"from"` - // Date the request was sent in Unix time - Date int64 `json:"date"` - // Optional. Bio of the user. - Bio string `json:"bio,omitempty"` - // Optional. Chat invite link that was used by the user to send the join request - InviteLink *ChatInviteLink `json:"invite_link,omitempty"` -} - -// ChatLocation Represents a location to which a chat is connected. -// https://core.telegram.org/bots/api#chatlocation -type ChatLocation struct { - // The location to which the supergroup is connected. Can't be a live location. - Location Location `json:"location"` - // Location address; 1-64 characters, as defined by the chat owner - Address string `json:"address"` -} - -// ChatMember This object contains information about one member of a chat. Currently, the following 6 types of chat members are supported: -// - ChatMemberOwner -// - ChatMemberAdministrator -// - ChatMemberMember -// - ChatMemberRestricted -// - ChatMemberLeft -// - ChatMemberBanned -// https://core.telegram.org/bots/api#chatmember -type ChatMember interface { - GetStatus() string - GetUser() User - chatMember() - // MergeChatMember returns a MergedChatMember struct to simplify working with complex telegram types in a non-generic world. - MergeChatMember() MergedChatMember -} - -// MergedChatMember is a helper type to simplify interactions with the various ChatMember subtypes. -type MergedChatMember struct { - // The member's status in the chat, always "creator" - Status string `json:"status"` - // Information about the user - User User `json:"user"` - // Optional. True, if the user's presence in the chat is hidden (Only for creator, administrator) - IsAnonymous bool `json:"is_anonymous,omitempty"` - // Optional. Custom title for this user (Only for creator, administrator) - CustomTitle string `json:"custom_title,omitempty"` - // Optional. True, if the bot is allowed to edit administrator privileges of that user (Only for administrator) - CanBeEdited bool `json:"can_be_edited,omitempty"` - // Optional. True, if the administrator can access the chat event log, chat statistics, message statistics in channels, see channel members, see anonymous administrators in supergroups and ignore slow mode. Implied by any other administrator privilege (Only for administrator) - CanManageChat bool `json:"can_manage_chat,omitempty"` - // Optional. True, if the administrator can delete messages of other users (Only for administrator) - CanDeleteMessages bool `json:"can_delete_messages,omitempty"` - // Optional. True, if the administrator can manage video chats (Only for administrator) - CanManageVideoChats bool `json:"can_manage_video_chats,omitempty"` - // Optional. True, if the administrator can restrict, ban or unban chat members (Only for administrator) - CanRestrictMembers bool `json:"can_restrict_members,omitempty"` - // Optional. True, if the administrator can add new administrators with a subset of their own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by the user) (Only for administrator) - CanPromoteMembers bool `json:"can_promote_members,omitempty"` - // Optional. True, if the user is allowed to change the chat title, photo and other settings (Only for administrator, restricted) - CanChangeInfo bool `json:"can_change_info,omitempty"` - // Optional. True, if the user is allowed to invite new users to the chat (Only for administrator, restricted) - CanInviteUsers bool `json:"can_invite_users,omitempty"` - // Optional. True, if the administrator can post in the channel; channels only (Only for administrator) - CanPostMessages bool `json:"can_post_messages,omitempty"` - // Optional. True, if the administrator can edit messages of other users and can pin messages; channels only (Only for administrator) - CanEditMessages bool `json:"can_edit_messages,omitempty"` - // Optional. True, if the user is allowed to pin messages; groups and supergroups only (Only for administrator, restricted) - CanPinMessages bool `json:"can_pin_messages,omitempty"` - // Optional. True, if the user is allowed to create, rename, close, and reopen forum topics; supergroups only (Only for administrator, restricted) - CanManageTopics bool `json:"can_manage_topics,omitempty"` - // Optional. True, if the user is a member of the chat at the moment of the request (Only for restricted) - IsMember bool `json:"is_member,omitempty"` - // Optional. True, if the user is allowed to send text messages, contacts, locations and venues (Only for restricted) - CanSendMessages bool `json:"can_send_messages,omitempty"` - // Optional. True, if the user is allowed to send audios, documents, photos, videos, video notes and voice notes (Only for restricted) - CanSendMediaMessages bool `json:"can_send_media_messages,omitempty"` - // Optional. True, if the user is allowed to send polls (Only for restricted) - CanSendPolls bool `json:"can_send_polls,omitempty"` - // Optional. True, if the user is allowed to send animations, games, stickers and use inline bots (Only for restricted) - CanSendOtherMessages bool `json:"can_send_other_messages,omitempty"` - // Optional. True, if the user is allowed to add web page previews to their messages (Only for restricted) - CanAddWebPagePreviews bool `json:"can_add_web_page_previews,omitempty"` - // Optional. Date when restrictions will be lifted for this user; unix time. If 0, then the user is restricted forever (Only for restricted, kicked) - UntilDate int64 `json:"until_date,omitempty"` -} - -// GetStatus is a helper method to easily access the common fields of an interface. -func (v MergedChatMember) GetStatus() string { - return v.Status -} - -// GetUser is a helper method to easily access the common fields of an interface. -func (v MergedChatMember) GetUser() User { - return v.User -} - -// MergedChatMember.chatMember is a dummy method to avoid interface implementation. -func (v MergedChatMember) chatMember() {} - -// MergeChatMember returns a MergedChatMember struct to simplify working with types in a non-generic world. -func (v MergedChatMember) MergeChatMember() MergedChatMember { - return v -} - -// unmarshalChatMemberArray is a JSON unmarshalling helper which allows unmarshalling an array of interfaces -// using unmarshalChatMember. -func unmarshalChatMemberArray(d json.RawMessage) ([]ChatMember, error) { - var ds []json.RawMessage - err := json.Unmarshal(d, &ds) - if err != nil { - return nil, err - } - - var vs []ChatMember - for _, d := range ds { - v, err := unmarshalChatMember(d) - if err != nil { - return nil, err - } - vs = append(vs, v) - } - - return vs, nil -} - -// unmarshalChatMember is a JSON unmarshal helper to marshal the right structs into a ChatMember interface -// based on the Status field. -func unmarshalChatMember(d json.RawMessage) (ChatMember, error) { - if len(d) == 0 { - return nil, nil - } - - t := struct { - Status string - }{} - err := json.Unmarshal(d, &t) - if err != nil { - return nil, err - } - - switch t.Status { - case "creator": - s := ChatMemberOwner{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - case "administrator": - s := ChatMemberAdministrator{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - case "member": - s := ChatMemberMember{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - case "restricted": - s := ChatMemberRestricted{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - case "left": - s := ChatMemberLeft{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - case "kicked": - s := ChatMemberBanned{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - } - return nil, fmt.Errorf("unknown interface with Status %v", t.Status) -} - -// ChatMemberAdministrator Represents a chat member that has some additional privileges. -// https://core.telegram.org/bots/api#chatmemberadministrator -type ChatMemberAdministrator struct { - // Information about the user - User User `json:"user"` - // True, if the bot is allowed to edit administrator privileges of that user - CanBeEdited bool `json:"can_be_edited"` - // True, if the user's presence in the chat is hidden - IsAnonymous bool `json:"is_anonymous"` - // True, if the administrator can access the chat event log, chat statistics, message statistics in channels, see channel members, see anonymous administrators in supergroups and ignore slow mode. Implied by any other administrator privilege - CanManageChat bool `json:"can_manage_chat"` - // True, if the administrator can delete messages of other users - CanDeleteMessages bool `json:"can_delete_messages"` - // True, if the administrator can manage video chats - CanManageVideoChats bool `json:"can_manage_video_chats"` - // True, if the administrator can restrict, ban or unban chat members - CanRestrictMembers bool `json:"can_restrict_members"` - // True, if the administrator can add new administrators with a subset of their own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by the user) - CanPromoteMembers bool `json:"can_promote_members"` - // True, if the user is allowed to change the chat title, photo and other settings - CanChangeInfo bool `json:"can_change_info"` - // True, if the user is allowed to invite new users to the chat - CanInviteUsers bool `json:"can_invite_users"` - // Optional. True, if the administrator can post in the channel; channels only - CanPostMessages bool `json:"can_post_messages,omitempty"` - // Optional. True, if the administrator can edit messages of other users and can pin messages; channels only - CanEditMessages bool `json:"can_edit_messages,omitempty"` - // Optional. True, if the user is allowed to pin messages; groups and supergroups only - CanPinMessages bool `json:"can_pin_messages,omitempty"` - // Optional. True, if the user is allowed to create, rename, close, and reopen forum topics; supergroups only - CanManageTopics bool `json:"can_manage_topics,omitempty"` - // Optional. Custom title for this user - CustomTitle string `json:"custom_title,omitempty"` -} - -// GetStatus is a helper method to easily access the common fields of an interface. -func (v ChatMemberAdministrator) GetStatus() string { - return "administrator" -} - -// GetUser is a helper method to easily access the common fields of an interface. -func (v ChatMemberAdministrator) GetUser() User { - return v.User -} - -// MergeChatMember returns a MergedChatMember struct to simplify working with types in a non-generic world. -func (v ChatMemberAdministrator) MergeChatMember() MergedChatMember { - return MergedChatMember{ - Status: "administrator", - User: v.User, - CanBeEdited: v.CanBeEdited, - IsAnonymous: v.IsAnonymous, - CanManageChat: v.CanManageChat, - CanDeleteMessages: v.CanDeleteMessages, - CanManageVideoChats: v.CanManageVideoChats, - CanRestrictMembers: v.CanRestrictMembers, - CanPromoteMembers: v.CanPromoteMembers, - CanChangeInfo: v.CanChangeInfo, - CanInviteUsers: v.CanInviteUsers, - CanPostMessages: v.CanPostMessages, - CanEditMessages: v.CanEditMessages, - CanPinMessages: v.CanPinMessages, - CanManageTopics: v.CanManageTopics, - CustomTitle: v.CustomTitle, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Status value. -func (v ChatMemberAdministrator) MarshalJSON() ([]byte, error) { - type alias ChatMemberAdministrator - a := struct { - Status string `json:"status"` - alias - }{ - Status: "administrator", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// ChatMemberAdministrator.chatMember is a dummy method to avoid interface implementation. -func (v ChatMemberAdministrator) chatMember() {} - -// ChatMemberBanned Represents a chat member that was banned in the chat and can't return to the chat or view chat messages. -// https://core.telegram.org/bots/api#chatmemberbanned -type ChatMemberBanned struct { - // Information about the user - User User `json:"user"` - // Date when restrictions will be lifted for this user; unix time. If 0, then the user is banned forever - UntilDate int64 `json:"until_date"` -} - -// GetStatus is a helper method to easily access the common fields of an interface. -func (v ChatMemberBanned) GetStatus() string { - return "kicked" -} - -// GetUser is a helper method to easily access the common fields of an interface. -func (v ChatMemberBanned) GetUser() User { - return v.User -} - -// MergeChatMember returns a MergedChatMember struct to simplify working with types in a non-generic world. -func (v ChatMemberBanned) MergeChatMember() MergedChatMember { - return MergedChatMember{ - Status: "kicked", - User: v.User, - UntilDate: v.UntilDate, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Status value. -func (v ChatMemberBanned) MarshalJSON() ([]byte, error) { - type alias ChatMemberBanned - a := struct { - Status string `json:"status"` - alias - }{ - Status: "kicked", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// ChatMemberBanned.chatMember is a dummy method to avoid interface implementation. -func (v ChatMemberBanned) chatMember() {} - -// ChatMemberLeft Represents a chat member that isn't currently a member of the chat, but may join it themselves. -// https://core.telegram.org/bots/api#chatmemberleft -type ChatMemberLeft struct { - // Information about the user - User User `json:"user"` -} - -// GetStatus is a helper method to easily access the common fields of an interface. -func (v ChatMemberLeft) GetStatus() string { - return "left" -} - -// GetUser is a helper method to easily access the common fields of an interface. -func (v ChatMemberLeft) GetUser() User { - return v.User -} - -// MergeChatMember returns a MergedChatMember struct to simplify working with types in a non-generic world. -func (v ChatMemberLeft) MergeChatMember() MergedChatMember { - return MergedChatMember{ - Status: "left", - User: v.User, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Status value. -func (v ChatMemberLeft) MarshalJSON() ([]byte, error) { - type alias ChatMemberLeft - a := struct { - Status string `json:"status"` - alias - }{ - Status: "left", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// ChatMemberLeft.chatMember is a dummy method to avoid interface implementation. -func (v ChatMemberLeft) chatMember() {} - -// ChatMemberMember Represents a chat member that has no additional privileges or restrictions. -// https://core.telegram.org/bots/api#chatmembermember -type ChatMemberMember struct { - // Information about the user - User User `json:"user"` -} - -// GetStatus is a helper method to easily access the common fields of an interface. -func (v ChatMemberMember) GetStatus() string { - return "member" -} - -// GetUser is a helper method to easily access the common fields of an interface. -func (v ChatMemberMember) GetUser() User { - return v.User -} - -// MergeChatMember returns a MergedChatMember struct to simplify working with types in a non-generic world. -func (v ChatMemberMember) MergeChatMember() MergedChatMember { - return MergedChatMember{ - Status: "member", - User: v.User, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Status value. -func (v ChatMemberMember) MarshalJSON() ([]byte, error) { - type alias ChatMemberMember - a := struct { - Status string `json:"status"` - alias - }{ - Status: "member", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// ChatMemberMember.chatMember is a dummy method to avoid interface implementation. -func (v ChatMemberMember) chatMember() {} - -// ChatMemberOwner Represents a chat member that owns the chat and has all administrator privileges. -// https://core.telegram.org/bots/api#chatmemberowner -type ChatMemberOwner struct { - // Information about the user - User User `json:"user"` - // True, if the user's presence in the chat is hidden - IsAnonymous bool `json:"is_anonymous"` - // Optional. Custom title for this user - CustomTitle string `json:"custom_title,omitempty"` -} - -// GetStatus is a helper method to easily access the common fields of an interface. -func (v ChatMemberOwner) GetStatus() string { - return "creator" -} - -// GetUser is a helper method to easily access the common fields of an interface. -func (v ChatMemberOwner) GetUser() User { - return v.User -} - -// MergeChatMember returns a MergedChatMember struct to simplify working with types in a non-generic world. -func (v ChatMemberOwner) MergeChatMember() MergedChatMember { - return MergedChatMember{ - Status: "creator", - User: v.User, - IsAnonymous: v.IsAnonymous, - CustomTitle: v.CustomTitle, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Status value. -func (v ChatMemberOwner) MarshalJSON() ([]byte, error) { - type alias ChatMemberOwner - a := struct { - Status string `json:"status"` - alias - }{ - Status: "creator", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// ChatMemberOwner.chatMember is a dummy method to avoid interface implementation. -func (v ChatMemberOwner) chatMember() {} - -// ChatMemberRestricted Represents a chat member that is under certain restrictions in the chat. Supergroups only. -// https://core.telegram.org/bots/api#chatmemberrestricted -type ChatMemberRestricted struct { - // Information about the user - User User `json:"user"` - // True, if the user is a member of the chat at the moment of the request - IsMember bool `json:"is_member"` - // True, if the user is allowed to change the chat title, photo and other settings - CanChangeInfo bool `json:"can_change_info"` - // True, if the user is allowed to invite new users to the chat - CanInviteUsers bool `json:"can_invite_users"` - // True, if the user is allowed to pin messages - CanPinMessages bool `json:"can_pin_messages"` - // True, if the user is allowed to create forum topics - CanManageTopics bool `json:"can_manage_topics"` - // True, if the user is allowed to send text messages, contacts, locations and venues - CanSendMessages bool `json:"can_send_messages"` - // True, if the user is allowed to send audios, documents, photos, videos, video notes and voice notes - CanSendMediaMessages bool `json:"can_send_media_messages"` - // True, if the user is allowed to send polls - CanSendPolls bool `json:"can_send_polls"` - // True, if the user is allowed to send animations, games, stickers and use inline bots - CanSendOtherMessages bool `json:"can_send_other_messages"` - // True, if the user is allowed to add web page previews to their messages - CanAddWebPagePreviews bool `json:"can_add_web_page_previews"` - // Date when restrictions will be lifted for this user; unix time. If 0, then the user is restricted forever - UntilDate int64 `json:"until_date"` -} - -// GetStatus is a helper method to easily access the common fields of an interface. -func (v ChatMemberRestricted) GetStatus() string { - return "restricted" -} - -// GetUser is a helper method to easily access the common fields of an interface. -func (v ChatMemberRestricted) GetUser() User { - return v.User -} - -// MergeChatMember returns a MergedChatMember struct to simplify working with types in a non-generic world. -func (v ChatMemberRestricted) MergeChatMember() MergedChatMember { - return MergedChatMember{ - Status: "restricted", - User: v.User, - IsMember: v.IsMember, - CanChangeInfo: v.CanChangeInfo, - CanInviteUsers: v.CanInviteUsers, - CanPinMessages: v.CanPinMessages, - CanManageTopics: v.CanManageTopics, - CanSendMessages: v.CanSendMessages, - CanSendMediaMessages: v.CanSendMediaMessages, - CanSendPolls: v.CanSendPolls, - CanSendOtherMessages: v.CanSendOtherMessages, - CanAddWebPagePreviews: v.CanAddWebPagePreviews, - UntilDate: v.UntilDate, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Status value. -func (v ChatMemberRestricted) MarshalJSON() ([]byte, error) { - type alias ChatMemberRestricted - a := struct { - Status string `json:"status"` - alias - }{ - Status: "restricted", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// ChatMemberRestricted.chatMember is a dummy method to avoid interface implementation. -func (v ChatMemberRestricted) chatMember() {} - -// ChatMemberUpdated This object represents changes in the status of a chat member. -// https://core.telegram.org/bots/api#chatmemberupdated -type ChatMemberUpdated struct { - // Chat the user belongs to - Chat Chat `json:"chat"` - // Performer of the action, which resulted in the change - From User `json:"from"` - // Date the change was done in Unix time - Date int64 `json:"date"` - // Previous information about the chat member - OldChatMember ChatMember `json:"old_chat_member"` - // New information about the chat member - NewChatMember ChatMember `json:"new_chat_member"` - // Optional. Chat invite link, which was used by the user to join the chat; for joining by invite link events only. - InviteLink *ChatInviteLink `json:"invite_link,omitempty"` -} - -// UnmarshalJSON is a custom JSON unmarshaller to use the helpers which allow for unmarshalling structs into interfaces. -func (v *ChatMemberUpdated) UnmarshalJSON(b []byte) error { - // All fields in ChatMemberUpdated, with interface fields as json.RawMessage - type tmp struct { - Chat Chat `json:"chat"` - From User `json:"from"` - Date int64 `json:"date"` - OldChatMember json.RawMessage `json:"old_chat_member"` - NewChatMember json.RawMessage `json:"new_chat_member"` - InviteLink *ChatInviteLink `json:"invite_link"` - } - t := tmp{} - err := json.Unmarshal(b, &t) - if err != nil { - return err - } - - v.Chat = t.Chat - v.From = t.From - v.Date = t.Date - v.OldChatMember, err = unmarshalChatMember(t.OldChatMember) - if err != nil { - return err - } - v.NewChatMember, err = unmarshalChatMember(t.NewChatMember) - if err != nil { - return err - } - v.InviteLink = t.InviteLink - - return nil -} - -// ChatPermissions Describes actions that a non-administrator user is allowed to take in a chat. -// https://core.telegram.org/bots/api#chatpermissions -type ChatPermissions struct { - // Optional. True, if the user is allowed to send text messages, contacts, locations and venues - CanSendMessages bool `json:"can_send_messages,omitempty"` - // Optional. True, if the user is allowed to send audios, documents, photos, videos, video notes and voice notes, implies can_send_messages - CanSendMediaMessages bool `json:"can_send_media_messages,omitempty"` - // Optional. True, if the user is allowed to send polls, implies can_send_messages - CanSendPolls bool `json:"can_send_polls,omitempty"` - // Optional. True, if the user is allowed to send animations, games, stickers and use inline bots, implies can_send_media_messages - CanSendOtherMessages bool `json:"can_send_other_messages,omitempty"` - // Optional. True, if the user is allowed to add web page previews to their messages, implies can_send_media_messages - CanAddWebPagePreviews bool `json:"can_add_web_page_previews,omitempty"` - // Optional. True, if the user is allowed to change the chat title, photo and other settings. Ignored in public supergroups - CanChangeInfo bool `json:"can_change_info,omitempty"` - // Optional. True, if the user is allowed to invite new users to the chat - CanInviteUsers bool `json:"can_invite_users,omitempty"` - // Optional. True, if the user is allowed to pin messages. Ignored in public supergroups - CanPinMessages bool `json:"can_pin_messages,omitempty"` - // Optional. True, if the user is allowed to create forum topics. If omitted defaults to the value of can_pin_messages - CanManageTopics bool `json:"can_manage_topics,omitempty"` -} - -// ChatPhoto This object represents a chat photo. -// https://core.telegram.org/bots/api#chatphoto -type ChatPhoto struct { - // File identifier of small (160x160) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed. - SmallFileId string `json:"small_file_id"` - // Unique file identifier of small (160x160) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - SmallFileUniqueId string `json:"small_file_unique_id"` - // File identifier of big (640x640) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed. - BigFileId string `json:"big_file_id"` - // Unique file identifier of big (640x640) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - BigFileUniqueId string `json:"big_file_unique_id"` -} - -// ChosenInlineResult Represents a result of an inline query that was chosen by the user and sent to their chat partner. -// Note: It is necessary to enable inline feedback via @BotFather in order to receive these objects in updates. -// https://core.telegram.org/bots/api#choseninlineresult -type ChosenInlineResult struct { - // The unique identifier for the result that was chosen - ResultId string `json:"result_id"` - // The user that chose the result - From User `json:"from"` - // Optional. Sender location, only for bots that require user location - Location *Location `json:"location,omitempty"` - // Optional. Identifier of the sent inline message. Available only if there is an inline keyboard attached to the message. Will be also received in callback queries and can be used to edit the message. - InlineMessageId string `json:"inline_message_id,omitempty"` - // The query that was used to obtain the result - Query string `json:"query"` -} - -// Contact This object represents a phone contact. -// https://core.telegram.org/bots/api#contact -type Contact struct { - // Contact's phone number - PhoneNumber string `json:"phone_number"` - // Contact's first name - FirstName string `json:"first_name"` - // Optional. Contact's last name - LastName string `json:"last_name,omitempty"` - // Optional. Contact's user identifier in Telegram. This number may have more than 32 significant bits and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a 64-bit integer or double-precision float type are safe for storing this identifier. - UserId int64 `json:"user_id,omitempty"` - // Optional. Additional data about the contact in the form of a vCard - Vcard string `json:"vcard,omitempty"` -} - -// Dice This object represents an animated emoji that displays a random value. -// https://core.telegram.org/bots/api#dice -type Dice struct { - // Emoji on which the dice throw animation is based - Emoji string `json:"emoji"` - // Value of the dice, 1-6 for "🎲", "🎯" and "🎳" base emoji, 1-5 for "🏀" and "⚽" base emoji, 1-64 for "🎰" base emoji - Value int64 `json:"value"` -} - -// Document This object represents a general file (as opposed to photos, voice messages and audio files). -// https://core.telegram.org/bots/api#document -type Document struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Optional. Document thumbnail as defined by sender - Thumb *PhotoSize `json:"thumb,omitempty"` - // Optional. Original filename as defined by sender - FileName string `json:"file_name,omitempty"` - // Optional. MIME type of the file as defined by sender - MimeType string `json:"mime_type,omitempty"` - // Optional. File size in bytes. It can be bigger than 2^31 and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this value. - FileSize int64 `json:"file_size,omitempty"` -} - -// EncryptedCredentials Describes data required for decrypting and authenticating EncryptedPassportElement. See the Telegram Passport Documentation for a complete description of the data decryption and authentication processes. -// https://core.telegram.org/bots/api#encryptedcredentials -type EncryptedCredentials struct { - // Base64-encoded encrypted JSON-serialized data with unique user's payload, data hashes and secrets required for EncryptedPassportElement decryption and authentication - Data string `json:"data"` - // Base64-encoded data hash for data authentication - Hash string `json:"hash"` - // Base64-encoded secret, encrypted with the bot's public RSA key, required for data decryption - Secret string `json:"secret"` -} - -// EncryptedPassportElement Describes documents or other Telegram Passport elements shared with the bot by the user. -// https://core.telegram.org/bots/api#encryptedpassportelement -type EncryptedPassportElement struct { - // Element type. One of "personal_details", "passport", "driver_license", "identity_card", "internal_passport", "address", "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration", "phone_number", "email". - Type string `json:"type"` - // Optional. Base64-encoded encrypted Telegram Passport element data provided by the user, available for "personal_details", "passport", "driver_license", "identity_card", "internal_passport" and "address" types. Can be decrypted and verified using the accompanying EncryptedCredentials. - Data string `json:"data,omitempty"` - // Optional. User's verified phone number, available only for "phone_number" type - PhoneNumber string `json:"phone_number,omitempty"` - // Optional. User's verified email address, available only for "email" type - Email string `json:"email,omitempty"` - // Optional. Array of encrypted files with documents provided by the user, available for "utility_bill", "bank_statement", "rental_agreement", "passport_registration" and "temporary_registration" types. Files can be decrypted and verified using the accompanying EncryptedCredentials. - Files []PassportFile `json:"files,omitempty"` - // Optional. Encrypted file with the front side of the document, provided by the user. Available for "passport", "driver_license", "identity_card" and "internal_passport". The file can be decrypted and verified using the accompanying EncryptedCredentials. - FrontSide *PassportFile `json:"front_side,omitempty"` - // Optional. Encrypted file with the reverse side of the document, provided by the user. Available for "driver_license" and "identity_card". The file can be decrypted and verified using the accompanying EncryptedCredentials. - ReverseSide *PassportFile `json:"reverse_side,omitempty"` - // Optional. Encrypted file with the selfie of the user holding a document, provided by the user; available for "passport", "driver_license", "identity_card" and "internal_passport". The file can be decrypted and verified using the accompanying EncryptedCredentials. - Selfie *PassportFile `json:"selfie,omitempty"` - // Optional. Array of encrypted files with translated versions of documents provided by the user. Available if requested for "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration" and "temporary_registration" types. Files can be decrypted and verified using the accompanying EncryptedCredentials. - Translation []PassportFile `json:"translation,omitempty"` - // Base64-encoded element hash for using in PassportElementErrorUnspecified - Hash string `json:"hash"` -} - -// File This object represents a file ready to be downloaded. The file can be downloaded via the link https://api.telegram.org/file/bot/. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile. -// https://core.telegram.org/bots/api#file -type File struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Optional. File size in bytes. It can be bigger than 2^31 and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this value. - FileSize int64 `json:"file_size,omitempty"` - // Optional. File path. Use https://api.telegram.org/file/bot/ to get the file. - FilePath string `json:"file_path,omitempty"` -} - -// ForceReply Upon receiving a message with this object, Telegram clients will display a reply interface to the user (act as if the user has selected the bot's message and tapped 'Reply'). This can be extremely useful if you want to create user-friendly step-by-step interfaces without having to sacrifice privacy mode. -// https://core.telegram.org/bots/api#forcereply -type ForceReply struct { - // Shows reply interface to the user, as if they manually selected the bot's message and tapped 'Reply' - ForceReply bool `json:"force_reply"` - // Optional. The placeholder to be shown in the input field when the reply is active; 1-64 characters - InputFieldPlaceholder string `json:"input_field_placeholder,omitempty"` - // Optional. Use this parameter if you want to force reply from specific users only. Targets: 1) users that are @mentioned in the text of the Message object; 2) if the bot's message is a reply (has reply_to_message_id), sender of the original message. - Selective bool `json:"selective,omitempty"` -} - -// ForceReply.replyMarkup is a dummy method to avoid interface implementation. -func (v ForceReply) replyMarkup() {} - -// ForumTopic This object represents a forum topic. -// https://core.telegram.org/bots/api#forumtopic -type ForumTopic struct { - // Unique identifier of the forum topic - MessageThreadId int64 `json:"message_thread_id"` - // Name of the topic - Name string `json:"name"` - // Color of the topic icon in RGB format - IconColor int64 `json:"icon_color"` - // Optional. Unique identifier of the custom emoji shown as the topic icon - IconCustomEmojiId string `json:"icon_custom_emoji_id,omitempty"` -} - -// ForumTopicClosed This object represents a service message about a forum topic closed in the chat. Currently holds no information. -// https://core.telegram.org/bots/api#forumtopicclosed -type ForumTopicClosed struct{} - -// ForumTopicCreated This object represents a service message about a new forum topic created in the chat. -// https://core.telegram.org/bots/api#forumtopiccreated -type ForumTopicCreated struct { - // Name of the topic - Name string `json:"name"` - // Color of the topic icon in RGB format - IconColor int64 `json:"icon_color"` - // Optional. Unique identifier of the custom emoji shown as the topic icon - IconCustomEmojiId string `json:"icon_custom_emoji_id,omitempty"` -} - -// ForumTopicReopened This object represents a service message about a forum topic reopened in the chat. Currently holds no information. -// https://core.telegram.org/bots/api#forumtopicreopened -type ForumTopicReopened struct{} - -// Game This object represents a game. Use BotFather to create and edit games, their short names will act as unique identifiers. -// https://core.telegram.org/bots/api#game -type Game struct { - // Title of the game - Title string `json:"title"` - // Description of the game - Description string `json:"description"` - // Photo that will be displayed in the game message in chats. - Photo []PhotoSize `json:"photo,omitempty"` - // Optional. Brief description of the game or high scores included in the game message. Can be automatically edited to include current high scores for the game when the bot calls setGameScore, or manually edited using editMessageText. 0-4096 characters. - Text string `json:"text,omitempty"` - // Optional. Special entities that appear in text, such as usernames, URLs, bot commands, etc. - TextEntities []MessageEntity `json:"text_entities,omitempty"` - // Optional. Animation that will be displayed in the game message in chats. Upload via BotFather - Animation *Animation `json:"animation,omitempty"` -} - -// GameHighScore This object represents one row of the high scores table for a game. -// https://core.telegram.org/bots/api#gamehighscore -type GameHighScore struct { - // Position in high score table for the game - Position int64 `json:"position"` - // User - User User `json:"user"` - // Score - Score int64 `json:"score"` -} - -// InlineKeyboardButton This object represents one button of an inline keyboard. You must use exactly one of the optional fields. -// https://core.telegram.org/bots/api#inlinekeyboardbutton -type InlineKeyboardButton struct { - // Label text on the button - Text string `json:"text"` - // Optional. HTTP or tg:// URL to be opened when the button is pressed. Links tg://user?id= can be used to mention a user by their ID without using a username, if this is allowed by their privacy settings. - Url string `json:"url,omitempty"` - // Optional. Data to be sent in a callback query to the bot when button is pressed, 1-64 bytes - CallbackData string `json:"callback_data,omitempty"` - // Optional. Description of the Web App that will be launched when the user presses the button. The Web App will be able to send an arbitrary message on behalf of the user using the method answerWebAppQuery. Available only in private chats between a user and the bot. - WebApp *WebAppInfo `json:"web_app,omitempty"` - // Optional. An HTTPS URL used to automatically authorize the user. Can be used as a replacement for the Telegram Login Widget. - LoginUrl *LoginUrl `json:"login_url,omitempty"` - // Optional. If set, pressing the button will prompt the user to select one of their chats, open that chat and insert the bot's username and the specified inline query in the input field. May be empty, in which case just the bot's username will be inserted. Note: This offers an easy way for users to start using your bot in inline mode when they are currently in a private chat with it. Especially useful when combined with switch_pm... actions - in this case the user will be automatically returned to the chat they switched from, skipping the chat selection screen. - SwitchInlineQuery *string `json:"switch_inline_query,omitempty"` - // Optional. If set, pressing the button will insert the bot's username and the specified inline query in the current chat's input field. May be empty, in which case only the bot's username will be inserted. This offers a quick way for the user to open your bot in inline mode in the same chat - good for selecting something from multiple options. - SwitchInlineQueryCurrentChat *string `json:"switch_inline_query_current_chat,omitempty"` - // Optional. Description of the game that will be launched when the user presses the button. NOTE: This type of button must always be the first button in the first row. - CallbackGame *CallbackGame `json:"callback_game,omitempty"` - // Optional. Specify True, to send a Pay button. NOTE: This type of button must always be the first button in the first row and can only be used in invoice messages. - Pay bool `json:"pay,omitempty"` -} - -// InlineKeyboardMarkup This object represents an inline keyboard that appears right next to the message it belongs to. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will display unsupported message. -// https://core.telegram.org/bots/api#inlinekeyboardmarkup -type InlineKeyboardMarkup struct { - // Array of button rows, each represented by an Array of InlineKeyboardButton objects - InlineKeyboard [][]InlineKeyboardButton `json:"inline_keyboard,omitempty"` -} - -// InlineKeyboardMarkup.replyMarkup is a dummy method to avoid interface implementation. -func (v InlineKeyboardMarkup) replyMarkup() {} - -// InlineQuery This object represents an incoming inline query. When the user sends an empty query, your bot could return some default or trending results. -// https://core.telegram.org/bots/api#inlinequery -type InlineQuery struct { - // Unique identifier for this query - Id string `json:"id"` - // Sender - From User `json:"from"` - // Text of the query (up to 256 characters) - Query string `json:"query"` - // Offset of the results to be returned, can be controlled by the bot - Offset string `json:"offset"` - // Optional. Type of the chat from which the inline query was sent. Can be either "sender" for a private chat with the inline query sender, "private", "group", "supergroup", or "channel". The chat type should be always known for requests sent from official clients and most third-party clients, unless the request was sent from a secret chat - ChatType string `json:"chat_type,omitempty"` - // Optional. Sender location, only for bots that request user location - Location *Location `json:"location,omitempty"` -} - -// InlineQueryResult This object represents one result of an inline query. Telegram clients currently support results of the following 20 types: -// - InlineQueryResultCachedAudio -// - InlineQueryResultCachedDocument -// - InlineQueryResultCachedGif -// - InlineQueryResultCachedMpeg4Gif -// - InlineQueryResultCachedPhoto -// - InlineQueryResultCachedSticker -// - InlineQueryResultCachedVideo -// - InlineQueryResultCachedVoice -// - InlineQueryResultArticle -// - InlineQueryResultAudio -// - InlineQueryResultContact -// - InlineQueryResultGame -// - InlineQueryResultDocument -// - InlineQueryResultGif -// - InlineQueryResultLocation -// - InlineQueryResultMpeg4Gif -// - InlineQueryResultPhoto -// - InlineQueryResultVenue -// - InlineQueryResultVideo -// - InlineQueryResultVoice -// Note: All URLs passed in inline query results will be available to end users and therefore must be assumed to be public. -// https://core.telegram.org/bots/api#inlinequeryresult -type InlineQueryResult interface { - GetType() string - GetId() string - inlineQueryResult() - // MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with complex telegram types in a non-generic world. - MergeInlineQueryResult() MergedInlineQueryResult -} - -// MergedInlineQueryResult is a helper type to simplify interactions with the various InlineQueryResult subtypes. -type MergedInlineQueryResult struct { - // Type of the result, must be audio - Type string `json:"type"` - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // Optional. A valid file identifier for the audio file (Only for audio) - AudioFileId string `json:"audio_file_id,omitempty"` - // Optional. Caption, 0-1024 characters after entities parsing (Only for audio, document, gif, mpeg4_gif, photo, video, voice, audio, document, gif, mpeg4_gif, photo, video, voice) - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the audio caption. See formatting options for more details. (Only for audio, document, gif, mpeg4_gif, photo, video, voice, audio, document, gif, mpeg4_gif, photo, video, voice) - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode (Only for audio, document, gif, mpeg4_gif, photo, video, voice, audio, document, gif, mpeg4_gif, photo, video, voice) - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the audio (Only for audio, document, gif, mpeg4_gif, photo, sticker, video, voice, article, audio, contact, document, gif, location, mpeg4_gif, photo, venue, video, voice) - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` - // Optional. Title for the result (Only for document, gif, mpeg4_gif, photo, video, voice, article, audio, document, gif, location, mpeg4_gif, photo, venue, video, voice) - Title string `json:"title,omitempty"` - // Optional. A valid file identifier for the file (Only for document) - DocumentFileId string `json:"document_file_id,omitempty"` - // Optional. Short description of the result (Only for document, photo, video, article, document, photo, video) - Description string `json:"description,omitempty"` - // Optional. A valid file identifier for the GIF file (Only for gif) - GifFileId string `json:"gif_file_id,omitempty"` - // Optional. A valid file identifier for the MPEG4 file (Only for mpeg4_gif) - Mpeg4FileId string `json:"mpeg4_file_id,omitempty"` - // Optional. A valid file identifier of the photo (Only for photo) - PhotoFileId string `json:"photo_file_id,omitempty"` - // Optional. A valid file identifier of the sticker (Only for sticker) - StickerFileId string `json:"sticker_file_id,omitempty"` - // Optional. A valid file identifier for the video file (Only for video) - VideoFileId string `json:"video_file_id,omitempty"` - // Optional. A valid file identifier for the voice message (Only for voice) - VoiceFileId string `json:"voice_file_id,omitempty"` - // Optional. URL of the result (Only for article) - Url string `json:"url,omitempty"` - // Optional. Pass True if you don't want the URL to be shown in the message (Only for article) - HideUrl bool `json:"hide_url,omitempty"` - // Optional. Url of the thumbnail for the result (Only for article, contact, document, gif, location, mpeg4_gif, photo, venue, video) - ThumbUrl string `json:"thumb_url,omitempty"` - // Optional. Thumbnail width (Only for article, contact, document, location, venue) - ThumbWidth int64 `json:"thumb_width,omitempty"` - // Optional. Thumbnail height (Only for article, contact, document, location, venue) - ThumbHeight int64 `json:"thumb_height,omitempty"` - // Optional. A valid URL for the audio file (Only for audio) - AudioUrl string `json:"audio_url,omitempty"` - // Optional. Performer (Only for audio) - Performer string `json:"performer,omitempty"` - // Optional. Audio duration in seconds (Only for audio) - AudioDuration int64 `json:"audio_duration,omitempty"` - // Optional. Contact's phone number (Only for contact) - PhoneNumber string `json:"phone_number,omitempty"` - // Optional. Contact's first name (Only for contact) - FirstName string `json:"first_name,omitempty"` - // Optional. Contact's last name (Only for contact) - LastName string `json:"last_name,omitempty"` - // Optional. Additional data about the contact in the form of a vCard, 0-2048 bytes (Only for contact) - Vcard string `json:"vcard,omitempty"` - // Optional. Short name of the game (Only for game) - GameShortName string `json:"game_short_name,omitempty"` - // Optional. A valid URL for the file (Only for document) - DocumentUrl string `json:"document_url,omitempty"` - // Optional. MIME type of the content of the file, either "application/pdf" or "application/zip" (Only for document, video) - MimeType string `json:"mime_type,omitempty"` - // Optional. A valid URL for the GIF file. File size must not exceed 1MB (Only for gif) - GifUrl string `json:"gif_url,omitempty"` - // Optional. Width of the GIF (Only for gif) - GifWidth int64 `json:"gif_width,omitempty"` - // Optional. Height of the GIF (Only for gif) - GifHeight int64 `json:"gif_height,omitempty"` - // Optional. Duration of the GIF in seconds (Only for gif) - GifDuration int64 `json:"gif_duration,omitempty"` - // Optional. MIME type of the thumbnail, must be one of "image/jpeg", "image/gif", or "video/mp4". Defaults to "image/jpeg" (Only for gif, mpeg4_gif) - ThumbMimeType string `json:"thumb_mime_type,omitempty"` - // Optional. Location latitude in degrees (Only for location, venue) - Latitude float64 `json:"latitude,omitempty"` - // Optional. Location longitude in degrees (Only for location, venue) - Longitude float64 `json:"longitude,omitempty"` - // Optional. The radius of uncertainty for the location, measured in meters; 0-1500 (Only for location) - HorizontalAccuracy float64 `json:"horizontal_accuracy,omitempty"` - // Optional. Period in seconds for which the location can be updated, should be between 60 and 86400. (Only for location) - LivePeriod int64 `json:"live_period,omitempty"` - // Optional. For live locations, a direction in which the user is moving, in degrees. Must be between 1 and 360 if specified. (Only for location) - Heading int64 `json:"heading,omitempty"` - // Optional. For live locations, a maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified. (Only for location) - ProximityAlertRadius int64 `json:"proximity_alert_radius,omitempty"` - // Optional. A valid URL for the MPEG4 file. File size must not exceed 1MB (Only for mpeg4_gif) - Mpeg4Url string `json:"mpeg4_url,omitempty"` - // Optional. Video width (Only for mpeg4_gif) - Mpeg4Width int64 `json:"mpeg4_width,omitempty"` - // Optional. Video height (Only for mpeg4_gif) - Mpeg4Height int64 `json:"mpeg4_height,omitempty"` - // Optional. Video duration in seconds (Only for mpeg4_gif) - Mpeg4Duration int64 `json:"mpeg4_duration,omitempty"` - // Optional. A valid URL of the photo. Photo must be in JPEG format. Photo size must not exceed 5MB (Only for photo) - PhotoUrl string `json:"photo_url,omitempty"` - // Optional. Width of the photo (Only for photo) - PhotoWidth int64 `json:"photo_width,omitempty"` - // Optional. Height of the photo (Only for photo) - PhotoHeight int64 `json:"photo_height,omitempty"` - // Optional. Address of the venue (Only for venue) - Address string `json:"address,omitempty"` - // Optional. Foursquare identifier of the venue if known (Only for venue) - FoursquareId string `json:"foursquare_id,omitempty"` - // Optional. Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) (Only for venue) - FoursquareType string `json:"foursquare_type,omitempty"` - // Optional. Google Places identifier of the venue (Only for venue) - GooglePlaceId string `json:"google_place_id,omitempty"` - // Optional. Google Places type of the venue. (See supported types.) (Only for venue) - GooglePlaceType string `json:"google_place_type,omitempty"` - // Optional. A valid URL for the embedded video player or video file (Only for video) - VideoUrl string `json:"video_url,omitempty"` - // Optional. Video width (Only for video) - VideoWidth int64 `json:"video_width,omitempty"` - // Optional. Video height (Only for video) - VideoHeight int64 `json:"video_height,omitempty"` - // Optional. Video duration in seconds (Only for video) - VideoDuration int64 `json:"video_duration,omitempty"` - // Optional. A valid URL for the voice recording (Only for voice) - VoiceUrl string `json:"voice_url,omitempty"` - // Optional. Recording duration in seconds (Only for voice) - VoiceDuration int64 `json:"voice_duration,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MergedInlineQueryResult) GetType() string { - return v.Type -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v MergedInlineQueryResult) GetId() string { - return v.Id -} - -// MergedInlineQueryResult.inlineQueryResult is a dummy method to avoid interface implementation. -func (v MergedInlineQueryResult) inlineQueryResult() {} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v MergedInlineQueryResult) MergeInlineQueryResult() MergedInlineQueryResult { - return v -} - -// InlineQueryResultArticle Represents a link to an article or web page. -// https://core.telegram.org/bots/api#inlinequeryresultarticle -type InlineQueryResultArticle struct { - // Unique identifier for this result, 1-64 Bytes - Id string `json:"id"` - // Title of the result - Title string `json:"title"` - // Content of the message to be sent - InputMessageContent InputMessageContent `json:"input_message_content"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. URL of the result - Url string `json:"url,omitempty"` - // Optional. Pass True if you don't want the URL to be shown in the message - HideUrl bool `json:"hide_url,omitempty"` - // Optional. Short description of the result - Description string `json:"description,omitempty"` - // Optional. Url of the thumbnail for the result - ThumbUrl string `json:"thumb_url,omitempty"` - // Optional. Thumbnail width - ThumbWidth int64 `json:"thumb_width,omitempty"` - // Optional. Thumbnail height - ThumbHeight int64 `json:"thumb_height,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultArticle) GetType() string { - return "article" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultArticle) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultArticle) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "article", - Id: v.Id, - Title: v.Title, - InputMessageContent: &v.InputMessageContent, - ReplyMarkup: v.ReplyMarkup, - Url: v.Url, - HideUrl: v.HideUrl, - Description: v.Description, - ThumbUrl: v.ThumbUrl, - ThumbWidth: v.ThumbWidth, - ThumbHeight: v.ThumbHeight, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultArticle) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultArticle - a := struct { - Type string `json:"type"` - alias - }{ - Type: "article", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultArticle.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultArticle) inlineQueryResult() {} - -// InlineQueryResultAudio Represents a link to an MP3 audio file. By default, this audio file will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the audio. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultaudio -type InlineQueryResultAudio struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid URL for the audio file - AudioUrl string `json:"audio_url"` - // Title - Title string `json:"title"` - // Optional. Caption, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the audio caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Performer - Performer string `json:"performer,omitempty"` - // Optional. Audio duration in seconds - AudioDuration int64 `json:"audio_duration,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the audio - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultAudio) GetType() string { - return "audio" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultAudio) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultAudio) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "audio", - Id: v.Id, - AudioUrl: v.AudioUrl, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - Performer: v.Performer, - AudioDuration: v.AudioDuration, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultAudio) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultAudio - a := struct { - Type string `json:"type"` - alias - }{ - Type: "audio", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultAudio.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultAudio) inlineQueryResult() {} - -// InlineQueryResultCachedAudio Represents a link to an MP3 audio file stored on the Telegram servers. By default, this audio file will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the audio. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultcachedaudio -type InlineQueryResultCachedAudio struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid file identifier for the audio file - AudioFileId string `json:"audio_file_id"` - // Optional. Caption, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the audio caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the audio - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedAudio) GetType() string { - return "audio" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedAudio) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedAudio) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "audio", - Id: v.Id, - AudioFileId: v.AudioFileId, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedAudio) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedAudio - a := struct { - Type string `json:"type"` - alias - }{ - Type: "audio", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedAudio.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedAudio) inlineQueryResult() {} - -// InlineQueryResultCachedDocument Represents a link to a file stored on the Telegram servers. By default, this file will be sent by the user with an optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the file. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultcacheddocument -type InlineQueryResultCachedDocument struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // Title for the result - Title string `json:"title"` - // A valid file identifier for the file - DocumentFileId string `json:"document_file_id"` - // Optional. Short description of the result - Description string `json:"description,omitempty"` - // Optional. Caption of the document to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the document caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the file - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedDocument) GetType() string { - return "document" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedDocument) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedDocument) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "document", - Id: v.Id, - Title: v.Title, - DocumentFileId: v.DocumentFileId, - Description: v.Description, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedDocument) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedDocument - a := struct { - Type string `json:"type"` - alias - }{ - Type: "document", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedDocument.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedDocument) inlineQueryResult() {} - -// InlineQueryResultCachedGif Represents a link to an animated GIF file stored on the Telegram servers. By default, this animated GIF file will be sent by the user with an optional caption. Alternatively, you can use input_message_content to send a message with specified content instead of the animation. -// https://core.telegram.org/bots/api#inlinequeryresultcachedgif -type InlineQueryResultCachedGif struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid file identifier for the GIF file - GifFileId string `json:"gif_file_id"` - // Optional. Title for the result - Title string `json:"title,omitempty"` - // Optional. Caption of the GIF file to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the GIF animation - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedGif) GetType() string { - return "gif" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedGif) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedGif) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "gif", - Id: v.Id, - GifFileId: v.GifFileId, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedGif) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedGif - a := struct { - Type string `json:"type"` - alias - }{ - Type: "gif", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedGif.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedGif) inlineQueryResult() {} - -// InlineQueryResultCachedMpeg4Gif Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the Telegram servers. By default, this animated MPEG-4 file will be sent by the user with an optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the animation. -// https://core.telegram.org/bots/api#inlinequeryresultcachedmpeg4gif -type InlineQueryResultCachedMpeg4Gif struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid file identifier for the MPEG4 file - Mpeg4FileId string `json:"mpeg4_file_id"` - // Optional. Title for the result - Title string `json:"title,omitempty"` - // Optional. Caption of the MPEG-4 file to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the video animation - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedMpeg4Gif) GetType() string { - return "mpeg4_gif" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedMpeg4Gif) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedMpeg4Gif) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "mpeg4_gif", - Id: v.Id, - Mpeg4FileId: v.Mpeg4FileId, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedMpeg4Gif) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedMpeg4Gif - a := struct { - Type string `json:"type"` - alias - }{ - Type: "mpeg4_gif", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedMpeg4Gif.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedMpeg4Gif) inlineQueryResult() {} - -// InlineQueryResultCachedPhoto Represents a link to a photo stored on the Telegram servers. By default, this photo will be sent by the user with an optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the photo. -// https://core.telegram.org/bots/api#inlinequeryresultcachedphoto -type InlineQueryResultCachedPhoto struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid file identifier of the photo - PhotoFileId string `json:"photo_file_id"` - // Optional. Title for the result - Title string `json:"title,omitempty"` - // Optional. Short description of the result - Description string `json:"description,omitempty"` - // Optional. Caption of the photo to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the photo caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the photo - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedPhoto) GetType() string { - return "photo" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedPhoto) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedPhoto) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "photo", - Id: v.Id, - PhotoFileId: v.PhotoFileId, - Title: v.Title, - Description: v.Description, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedPhoto) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedPhoto - a := struct { - Type string `json:"type"` - alias - }{ - Type: "photo", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedPhoto.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedPhoto) inlineQueryResult() {} - -// InlineQueryResultCachedSticker Represents a link to a sticker stored on the Telegram servers. By default, this sticker will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the sticker. -// Note: This will only work in Telegram versions released after 9 April, 2016 for static stickers and after 06 July, 2019 for animated stickers. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultcachedsticker -type InlineQueryResultCachedSticker struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid file identifier of the sticker - StickerFileId string `json:"sticker_file_id"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the sticker - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedSticker) GetType() string { - return "sticker" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedSticker) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedSticker) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "sticker", - Id: v.Id, - StickerFileId: v.StickerFileId, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedSticker) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedSticker - a := struct { - Type string `json:"type"` - alias - }{ - Type: "sticker", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedSticker.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedSticker) inlineQueryResult() {} - -// InlineQueryResultCachedVideo Represents a link to a video file stored on the Telegram servers. By default, this video file will be sent by the user with an optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the video. -// https://core.telegram.org/bots/api#inlinequeryresultcachedvideo -type InlineQueryResultCachedVideo struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid file identifier for the video file - VideoFileId string `json:"video_file_id"` - // Title for the result - Title string `json:"title"` - // Optional. Short description of the result - Description string `json:"description,omitempty"` - // Optional. Caption of the video to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the video caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the video - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedVideo) GetType() string { - return "video" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedVideo) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedVideo) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "video", - Id: v.Id, - VideoFileId: v.VideoFileId, - Title: v.Title, - Description: v.Description, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedVideo) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedVideo - a := struct { - Type string `json:"type"` - alias - }{ - Type: "video", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedVideo.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedVideo) inlineQueryResult() {} - -// InlineQueryResultCachedVoice Represents a link to a voice message stored on the Telegram servers. By default, this voice message will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the voice message. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultcachedvoice -type InlineQueryResultCachedVoice struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid file identifier for the voice message - VoiceFileId string `json:"voice_file_id"` - // Voice message title - Title string `json:"title"` - // Optional. Caption, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the voice message caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the voice message - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedVoice) GetType() string { - return "voice" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultCachedVoice) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultCachedVoice) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "voice", - Id: v.Id, - VoiceFileId: v.VoiceFileId, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultCachedVoice) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultCachedVoice - a := struct { - Type string `json:"type"` - alias - }{ - Type: "voice", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultCachedVoice.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultCachedVoice) inlineQueryResult() {} - -// InlineQueryResultContact Represents a contact with a phone number. By default, this contact will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the contact. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultcontact -type InlineQueryResultContact struct { - // Unique identifier for this result, 1-64 Bytes - Id string `json:"id"` - // Contact's phone number - PhoneNumber string `json:"phone_number"` - // Contact's first name - FirstName string `json:"first_name"` - // Optional. Contact's last name - LastName string `json:"last_name,omitempty"` - // Optional. Additional data about the contact in the form of a vCard, 0-2048 bytes - Vcard string `json:"vcard,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the contact - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` - // Optional. Url of the thumbnail for the result - ThumbUrl string `json:"thumb_url,omitempty"` - // Optional. Thumbnail width - ThumbWidth int64 `json:"thumb_width,omitempty"` - // Optional. Thumbnail height - ThumbHeight int64 `json:"thumb_height,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultContact) GetType() string { - return "contact" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultContact) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultContact) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "contact", - Id: v.Id, - PhoneNumber: v.PhoneNumber, - FirstName: v.FirstName, - LastName: v.LastName, - Vcard: v.Vcard, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - ThumbUrl: v.ThumbUrl, - ThumbWidth: v.ThumbWidth, - ThumbHeight: v.ThumbHeight, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultContact) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultContact - a := struct { - Type string `json:"type"` - alias - }{ - Type: "contact", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultContact.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultContact) inlineQueryResult() {} - -// InlineQueryResultDocument Represents a link to a file. By default, this file will be sent by the user with an optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the file. Currently, only .PDF and .ZIP files can be sent using this method. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultdocument -type InlineQueryResultDocument struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // Title for the result - Title string `json:"title"` - // Optional. Caption of the document to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the document caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // A valid URL for the file - DocumentUrl string `json:"document_url"` - // MIME type of the content of the file, either "application/pdf" or "application/zip" - MimeType string `json:"mime_type"` - // Optional. Short description of the result - Description string `json:"description,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the file - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` - // Optional. URL of the thumbnail (JPEG only) for the file - ThumbUrl string `json:"thumb_url,omitempty"` - // Optional. Thumbnail width - ThumbWidth int64 `json:"thumb_width,omitempty"` - // Optional. Thumbnail height - ThumbHeight int64 `json:"thumb_height,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultDocument) GetType() string { - return "document" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultDocument) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultDocument) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "document", - Id: v.Id, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - DocumentUrl: v.DocumentUrl, - MimeType: v.MimeType, - Description: v.Description, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - ThumbUrl: v.ThumbUrl, - ThumbWidth: v.ThumbWidth, - ThumbHeight: v.ThumbHeight, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultDocument) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultDocument - a := struct { - Type string `json:"type"` - alias - }{ - Type: "document", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultDocument.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultDocument) inlineQueryResult() {} - -// InlineQueryResultGame Represents a Game. -// Note: This will only work in Telegram versions released after October 1, 2016. Older clients will not display any inline results if a game result is among them. -// https://core.telegram.org/bots/api#inlinequeryresultgame -type InlineQueryResultGame struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // Short name of the game - GameShortName string `json:"game_short_name"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultGame) GetType() string { - return "game" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultGame) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultGame) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "game", - Id: v.Id, - GameShortName: v.GameShortName, - ReplyMarkup: v.ReplyMarkup, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultGame) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultGame - a := struct { - Type string `json:"type"` - alias - }{ - Type: "game", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultGame.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultGame) inlineQueryResult() {} - -// InlineQueryResultGif Represents a link to an animated GIF file. By default, this animated GIF file will be sent by the user with optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the animation. -// https://core.telegram.org/bots/api#inlinequeryresultgif -type InlineQueryResultGif struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid URL for the GIF file. File size must not exceed 1MB - GifUrl string `json:"gif_url"` - // Optional. Width of the GIF - GifWidth int64 `json:"gif_width,omitempty"` - // Optional. Height of the GIF - GifHeight int64 `json:"gif_height,omitempty"` - // Optional. Duration of the GIF in seconds - GifDuration int64 `json:"gif_duration,omitempty"` - // URL of the static (JPEG or GIF) or animated (MPEG4) thumbnail for the result - ThumbUrl string `json:"thumb_url"` - // Optional. MIME type of the thumbnail, must be one of "image/jpeg", "image/gif", or "video/mp4". Defaults to "image/jpeg" - ThumbMimeType string `json:"thumb_mime_type,omitempty"` - // Optional. Title for the result - Title string `json:"title,omitempty"` - // Optional. Caption of the GIF file to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the GIF animation - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultGif) GetType() string { - return "gif" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultGif) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultGif) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "gif", - Id: v.Id, - GifUrl: v.GifUrl, - GifWidth: v.GifWidth, - GifHeight: v.GifHeight, - GifDuration: v.GifDuration, - ThumbUrl: v.ThumbUrl, - ThumbMimeType: v.ThumbMimeType, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultGif) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultGif - a := struct { - Type string `json:"type"` - alias - }{ - Type: "gif", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultGif.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultGif) inlineQueryResult() {} - -// InlineQueryResultLocation Represents a location on a map. By default, the location will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the location. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultlocation -type InlineQueryResultLocation struct { - // Unique identifier for this result, 1-64 Bytes - Id string `json:"id"` - // Location latitude in degrees - Latitude float64 `json:"latitude"` - // Location longitude in degrees - Longitude float64 `json:"longitude"` - // Location title - Title string `json:"title"` - // Optional. The radius of uncertainty for the location, measured in meters; 0-1500 - HorizontalAccuracy float64 `json:"horizontal_accuracy,omitempty"` - // Optional. Period in seconds for which the location can be updated, should be between 60 and 86400. - LivePeriod int64 `json:"live_period,omitempty"` - // Optional. For live locations, a direction in which the user is moving, in degrees. Must be between 1 and 360 if specified. - Heading int64 `json:"heading,omitempty"` - // Optional. For live locations, a maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified. - ProximityAlertRadius int64 `json:"proximity_alert_radius,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the location - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` - // Optional. Url of the thumbnail for the result - ThumbUrl string `json:"thumb_url,omitempty"` - // Optional. Thumbnail width - ThumbWidth int64 `json:"thumb_width,omitempty"` - // Optional. Thumbnail height - ThumbHeight int64 `json:"thumb_height,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultLocation) GetType() string { - return "location" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultLocation) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultLocation) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "location", - Id: v.Id, - Latitude: v.Latitude, - Longitude: v.Longitude, - Title: v.Title, - HorizontalAccuracy: v.HorizontalAccuracy, - LivePeriod: v.LivePeriod, - Heading: v.Heading, - ProximityAlertRadius: v.ProximityAlertRadius, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - ThumbUrl: v.ThumbUrl, - ThumbWidth: v.ThumbWidth, - ThumbHeight: v.ThumbHeight, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultLocation) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultLocation - a := struct { - Type string `json:"type"` - alias - }{ - Type: "location", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultLocation.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultLocation) inlineQueryResult() {} - -// InlineQueryResultMpeg4Gif Represents a link to a video animation (H.264/MPEG-4 AVC video without sound). By default, this animated MPEG-4 file will be sent by the user with optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the animation. -// https://core.telegram.org/bots/api#inlinequeryresultmpeg4gif -type InlineQueryResultMpeg4Gif struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid URL for the MPEG4 file. File size must not exceed 1MB - Mpeg4Url string `json:"mpeg4_url"` - // Optional. Video width - Mpeg4Width int64 `json:"mpeg4_width,omitempty"` - // Optional. Video height - Mpeg4Height int64 `json:"mpeg4_height,omitempty"` - // Optional. Video duration in seconds - Mpeg4Duration int64 `json:"mpeg4_duration,omitempty"` - // URL of the static (JPEG or GIF) or animated (MPEG4) thumbnail for the result - ThumbUrl string `json:"thumb_url"` - // Optional. MIME type of the thumbnail, must be one of "image/jpeg", "image/gif", or "video/mp4". Defaults to "image/jpeg" - ThumbMimeType string `json:"thumb_mime_type,omitempty"` - // Optional. Title for the result - Title string `json:"title,omitempty"` - // Optional. Caption of the MPEG-4 file to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the video animation - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultMpeg4Gif) GetType() string { - return "mpeg4_gif" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultMpeg4Gif) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultMpeg4Gif) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "mpeg4_gif", - Id: v.Id, - Mpeg4Url: v.Mpeg4Url, - Mpeg4Width: v.Mpeg4Width, - Mpeg4Height: v.Mpeg4Height, - Mpeg4Duration: v.Mpeg4Duration, - ThumbUrl: v.ThumbUrl, - ThumbMimeType: v.ThumbMimeType, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultMpeg4Gif) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultMpeg4Gif - a := struct { - Type string `json:"type"` - alias - }{ - Type: "mpeg4_gif", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultMpeg4Gif.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultMpeg4Gif) inlineQueryResult() {} - -// InlineQueryResultPhoto Represents a link to a photo. By default, this photo will be sent by the user with optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the photo. -// https://core.telegram.org/bots/api#inlinequeryresultphoto -type InlineQueryResultPhoto struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid URL of the photo. Photo must be in JPEG format. Photo size must not exceed 5MB - PhotoUrl string `json:"photo_url"` - // URL of the thumbnail for the photo - ThumbUrl string `json:"thumb_url"` - // Optional. Width of the photo - PhotoWidth int64 `json:"photo_width,omitempty"` - // Optional. Height of the photo - PhotoHeight int64 `json:"photo_height,omitempty"` - // Optional. Title for the result - Title string `json:"title,omitempty"` - // Optional. Short description of the result - Description string `json:"description,omitempty"` - // Optional. Caption of the photo to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the photo caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the photo - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultPhoto) GetType() string { - return "photo" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultPhoto) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultPhoto) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "photo", - Id: v.Id, - PhotoUrl: v.PhotoUrl, - ThumbUrl: v.ThumbUrl, - PhotoWidth: v.PhotoWidth, - PhotoHeight: v.PhotoHeight, - Title: v.Title, - Description: v.Description, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultPhoto) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultPhoto - a := struct { - Type string `json:"type"` - alias - }{ - Type: "photo", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultPhoto.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultPhoto) inlineQueryResult() {} - -// InlineQueryResultVenue Represents a venue. By default, the venue will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the venue. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultvenue -type InlineQueryResultVenue struct { - // Unique identifier for this result, 1-64 Bytes - Id string `json:"id"` - // Latitude of the venue location in degrees - Latitude float64 `json:"latitude"` - // Longitude of the venue location in degrees - Longitude float64 `json:"longitude"` - // Title of the venue - Title string `json:"title"` - // Address of the venue - Address string `json:"address"` - // Optional. Foursquare identifier of the venue if known - FoursquareId string `json:"foursquare_id,omitempty"` - // Optional. Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) - FoursquareType string `json:"foursquare_type,omitempty"` - // Optional. Google Places identifier of the venue - GooglePlaceId string `json:"google_place_id,omitempty"` - // Optional. Google Places type of the venue. (See supported types.) - GooglePlaceType string `json:"google_place_type,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the venue - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` - // Optional. Url of the thumbnail for the result - ThumbUrl string `json:"thumb_url,omitempty"` - // Optional. Thumbnail width - ThumbWidth int64 `json:"thumb_width,omitempty"` - // Optional. Thumbnail height - ThumbHeight int64 `json:"thumb_height,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultVenue) GetType() string { - return "venue" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultVenue) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultVenue) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "venue", - Id: v.Id, - Latitude: v.Latitude, - Longitude: v.Longitude, - Title: v.Title, - Address: v.Address, - FoursquareId: v.FoursquareId, - FoursquareType: v.FoursquareType, - GooglePlaceId: v.GooglePlaceId, - GooglePlaceType: v.GooglePlaceType, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - ThumbUrl: v.ThumbUrl, - ThumbWidth: v.ThumbWidth, - ThumbHeight: v.ThumbHeight, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultVenue) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultVenue - a := struct { - Type string `json:"type"` - alias - }{ - Type: "venue", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultVenue.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultVenue) inlineQueryResult() {} - -// InlineQueryResultVideo Represents a link to a page containing an embedded video player or a video file. By default, this video file will be sent by the user with an optional caption. Alternatively, you can use input_message_content to send a message with the specified content instead of the video. -// https://core.telegram.org/bots/api#inlinequeryresultvideo -type InlineQueryResultVideo struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid URL for the embedded video player or video file - VideoUrl string `json:"video_url"` - // MIME type of the content of the video URL, "text/html" or "video/mp4" - MimeType string `json:"mime_type"` - // URL of the thumbnail (JPEG only) for the video - ThumbUrl string `json:"thumb_url"` - // Title for the result - Title string `json:"title"` - // Optional. Caption of the video to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the video caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Video width - VideoWidth int64 `json:"video_width,omitempty"` - // Optional. Video height - VideoHeight int64 `json:"video_height,omitempty"` - // Optional. Video duration in seconds - VideoDuration int64 `json:"video_duration,omitempty"` - // Optional. Short description of the result - Description string `json:"description,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the video. This field is required if InlineQueryResultVideo is used to send an HTML-page as a result (e.g., a YouTube video). - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultVideo) GetType() string { - return "video" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultVideo) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultVideo) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "video", - Id: v.Id, - VideoUrl: v.VideoUrl, - MimeType: v.MimeType, - ThumbUrl: v.ThumbUrl, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - VideoWidth: v.VideoWidth, - VideoHeight: v.VideoHeight, - VideoDuration: v.VideoDuration, - Description: v.Description, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultVideo) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultVideo - a := struct { - Type string `json:"type"` - alias - }{ - Type: "video", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultVideo.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultVideo) inlineQueryResult() {} - -// InlineQueryResultVoice Represents a link to a voice recording in an .OGG container encoded with OPUS. By default, this voice recording will be sent by the user. Alternatively, you can use input_message_content to send a message with the specified content instead of the the voice message. -// Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them. -// https://core.telegram.org/bots/api#inlinequeryresultvoice -type InlineQueryResultVoice struct { - // Unique identifier for this result, 1-64 bytes - Id string `json:"id"` - // A valid URL for the voice recording - VoiceUrl string `json:"voice_url"` - // Recording title - Title string `json:"title"` - // Optional. Caption, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the voice message caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Recording duration in seconds - VoiceDuration int64 `json:"voice_duration,omitempty"` - // Optional. Inline keyboard attached to the message - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` - // Optional. Content of the message to be sent instead of the voice recording - InputMessageContent *InputMessageContent `json:"input_message_content,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultVoice) GetType() string { - return "voice" -} - -// GetId is a helper method to easily access the common fields of an interface. -func (v InlineQueryResultVoice) GetId() string { - return v.Id -} - -// MergeInlineQueryResult returns a MergedInlineQueryResult struct to simplify working with types in a non-generic world. -func (v InlineQueryResultVoice) MergeInlineQueryResult() MergedInlineQueryResult { - return MergedInlineQueryResult{ - Type: "voice", - Id: v.Id, - VoiceUrl: v.VoiceUrl, - Title: v.Title, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - VoiceDuration: v.VoiceDuration, - ReplyMarkup: v.ReplyMarkup, - InputMessageContent: v.InputMessageContent, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InlineQueryResultVoice) MarshalJSON() ([]byte, error) { - type alias InlineQueryResultVoice - a := struct { - Type string `json:"type"` - alias - }{ - Type: "voice", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InlineQueryResultVoice.inlineQueryResult is a dummy method to avoid interface implementation. -func (v InlineQueryResultVoice) inlineQueryResult() {} - -// InputContactMessageContent Represents the content of a contact message to be sent as the result of an inline query. -// https://core.telegram.org/bots/api#inputcontactmessagecontent -type InputContactMessageContent struct { - // Contact's phone number - PhoneNumber string `json:"phone_number"` - // Contact's first name - FirstName string `json:"first_name"` - // Optional. Contact's last name - LastName string `json:"last_name,omitempty"` - // Optional. Additional data about the contact in the form of a vCard, 0-2048 bytes - Vcard string `json:"vcard,omitempty"` -} - -// InputContactMessageContent.inputMessageContent is a dummy method to avoid interface implementation. -func (v InputContactMessageContent) inputMessageContent() {} - -// InputFile This object represents the contents of a file to be uploaded. Must be posted using multipart/form-data in the usual way that files are uploaded via the browser. -// https://core.telegram.org/bots/api#inputfile -type InputFile interface{} - -// InputInvoiceMessageContent Represents the content of an invoice message to be sent as the result of an inline query. -// https://core.telegram.org/bots/api#inputinvoicemessagecontent -type InputInvoiceMessageContent struct { - // Product name, 1-32 characters - Title string `json:"title"` - // Product description, 1-255 characters - Description string `json:"description"` - // Bot-defined invoice payload, 1-128 bytes. This will not be displayed to the user, use for your internal processes. - Payload string `json:"payload"` - // Payment provider token, obtained via @BotFather - ProviderToken string `json:"provider_token"` - // Three-letter ISO 4217 currency code, see more on currencies - Currency string `json:"currency"` - // Price breakdown, a JSON-serialized list of components (e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.) - Prices []LabeledPrice `json:"prices,omitempty"` - // Optional. The maximum accepted amount for tips in the smallest units of the currency (integer, not float/double). For example, for a maximum tip of US$ 1.45 pass max_tip_amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). Defaults to 0 - MaxTipAmount int64 `json:"max_tip_amount,omitempty"` - // Optional. A JSON-serialized array of suggested amounts of tip in the smallest units of the currency (integer, not float/double). At most 4 suggested tip amounts can be specified. The suggested tip amounts must be positive, passed in a strictly increased order and must not exceed max_tip_amount. - SuggestedTipAmounts []int64 `json:"suggested_tip_amounts,omitempty"` - // Optional. A JSON-serialized object for data about the invoice, which will be shared with the payment provider. A detailed description of the required fields should be provided by the payment provider. - ProviderData string `json:"provider_data,omitempty"` - // Optional. URL of the product photo for the invoice. Can be a photo of the goods or a marketing image for a service. - PhotoUrl string `json:"photo_url,omitempty"` - // Optional. Photo size in bytes - PhotoSize int64 `json:"photo_size,omitempty"` - // Optional. Photo width - PhotoWidth int64 `json:"photo_width,omitempty"` - // Optional. Photo height - PhotoHeight int64 `json:"photo_height,omitempty"` - // Optional. Pass True if you require the user's full name to complete the order - NeedName bool `json:"need_name,omitempty"` - // Optional. Pass True if you require the user's phone number to complete the order - NeedPhoneNumber bool `json:"need_phone_number,omitempty"` - // Optional. Pass True if you require the user's email address to complete the order - NeedEmail bool `json:"need_email,omitempty"` - // Optional. Pass True if you require the user's shipping address to complete the order - NeedShippingAddress bool `json:"need_shipping_address,omitempty"` - // Optional. Pass True if the user's phone number should be sent to provider - SendPhoneNumberToProvider bool `json:"send_phone_number_to_provider,omitempty"` - // Optional. Pass True if the user's email address should be sent to provider - SendEmailToProvider bool `json:"send_email_to_provider,omitempty"` - // Optional. Pass True if the final price depends on the shipping method - IsFlexible bool `json:"is_flexible,omitempty"` -} - -// InputInvoiceMessageContent.inputMessageContent is a dummy method to avoid interface implementation. -func (v InputInvoiceMessageContent) inputMessageContent() {} - -// InputLocationMessageContent Represents the content of a location message to be sent as the result of an inline query. -// https://core.telegram.org/bots/api#inputlocationmessagecontent -type InputLocationMessageContent struct { - // Latitude of the location in degrees - Latitude float64 `json:"latitude"` - // Longitude of the location in degrees - Longitude float64 `json:"longitude"` - // Optional. The radius of uncertainty for the location, measured in meters; 0-1500 - HorizontalAccuracy float64 `json:"horizontal_accuracy,omitempty"` - // Optional. Period in seconds for which the location can be updated, should be between 60 and 86400. - LivePeriod int64 `json:"live_period,omitempty"` - // Optional. For live locations, a direction in which the user is moving, in degrees. Must be between 1 and 360 if specified. - Heading int64 `json:"heading,omitempty"` - // Optional. For live locations, a maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified. - ProximityAlertRadius int64 `json:"proximity_alert_radius,omitempty"` -} - -// InputLocationMessageContent.inputMessageContent is a dummy method to avoid interface implementation. -func (v InputLocationMessageContent) inputMessageContent() {} - -// InputMedia This object represents the content of a media message to be sent. It should be one of -// - InputMediaAnimation -// - InputMediaDocument -// - InputMediaAudio -// - InputMediaPhoto -// - InputMediaVideo -// https://core.telegram.org/bots/api#inputmedia -type InputMedia interface { - GetType() string - GetMedia() InputFile - inputMedia() - // InputMediaParams allows for uploading InputMedia files with attachments. - InputMediaParams(string, map[string]NamedReader) ([]byte, error) - // MergeInputMedia returns a MergedInputMedia struct to simplify working with complex telegram types in a non-generic world. - MergeInputMedia() MergedInputMedia -} - -// MergedInputMedia is a helper type to simplify interactions with the various InputMedia subtypes. -type MergedInputMedia struct { - // Type of the result, must be animation - Type string `json:"type"` - // File to send. Pass a file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP URL for Telegram to get a file from the Internet, or pass "attach://" to upload a new one using multipart/form-data under name. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Media InputFile `json:"media"` - // Optional. Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files (Only for animation, document, audio, video) - Thumb *InputFile `json:"thumb,omitempty"` - // Optional. Caption of the animation to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the animation caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Animation width (Only for animation, video) - Width int64 `json:"width,omitempty"` - // Optional. Animation height (Only for animation, video) - Height int64 `json:"height,omitempty"` - // Optional. Animation duration in seconds (Only for animation, audio, video) - Duration int64 `json:"duration,omitempty"` - // Optional. Disables automatic server-side content type detection for files uploaded using multipart/form-data. Always True, if the document is sent as part of an album. (Only for document) - DisableContentTypeDetection bool `json:"disable_content_type_detection,omitempty"` - // Optional. Performer of the audio (Only for audio) - Performer string `json:"performer,omitempty"` - // Optional. Title of the audio (Only for audio) - Title string `json:"title,omitempty"` - // Optional. Pass True if the uploaded video is suitable for streaming (Only for video) - SupportsStreaming bool `json:"supports_streaming,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MergedInputMedia) GetType() string { - return v.Type -} - -// GetMedia is a helper method to easily access the common fields of an interface. -func (v MergedInputMedia) GetMedia() InputFile { - return v.Media -} - -// MergedInputMedia.inputMedia is a dummy method to avoid interface implementation. -func (v MergedInputMedia) inputMedia() {} - -// MergeInputMedia returns a MergedInputMedia struct to simplify working with types in a non-generic world. -func (v MergedInputMedia) MergeInputMedia() MergedInputMedia { - return v -} - -// InputMediaAnimation Represents an animation file (GIF or H.264/MPEG-4 AVC video without sound) to be sent. -// https://core.telegram.org/bots/api#inputmediaanimation -type InputMediaAnimation struct { - // File to send. Pass a file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP URL for Telegram to get a file from the Internet, or pass "attach://" to upload a new one using multipart/form-data under name. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Media InputFile `json:"media"` - // Optional. Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb *InputFile `json:"thumb,omitempty"` - // Optional. Caption of the animation to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the animation caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Animation width - Width int64 `json:"width,omitempty"` - // Optional. Animation height - Height int64 `json:"height,omitempty"` - // Optional. Animation duration in seconds - Duration int64 `json:"duration,omitempty"` -} - -func (v InputMediaAnimation) InputMediaParams(mediaName string, data map[string]NamedReader) ([]byte, error) { - if v.Media != nil { - switch m := v.Media.(type) { - case string: - // ok, noop - - case NamedReader: - v.Media = "attach://" + mediaName - data[mediaName] = m - - case io.Reader: - v.Media = "attach://" + mediaName - data[mediaName] = NamedFile{File: m} - - default: - return nil, fmt.Errorf("unknown type for InputMedia: %T", v.Media) - } - } - - return json.Marshal(v) -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InputMediaAnimation) GetType() string { - return "animation" -} - -// GetMedia is a helper method to easily access the common fields of an interface. -func (v InputMediaAnimation) GetMedia() InputFile { - return v.Media -} - -// MergeInputMedia returns a MergedInputMedia struct to simplify working with types in a non-generic world. -func (v InputMediaAnimation) MergeInputMedia() MergedInputMedia { - return MergedInputMedia{ - Type: "animation", - Media: v.Media, - Thumb: v.Thumb, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - Width: v.Width, - Height: v.Height, - Duration: v.Duration, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InputMediaAnimation) MarshalJSON() ([]byte, error) { - type alias InputMediaAnimation - a := struct { - Type string `json:"type"` - alias - }{ - Type: "animation", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InputMediaAnimation.inputMedia is a dummy method to avoid interface implementation. -func (v InputMediaAnimation) inputMedia() {} - -// InputMediaAudio Represents an audio file to be treated as music to be sent. -// https://core.telegram.org/bots/api#inputmediaaudio -type InputMediaAudio struct { - // File to send. Pass a file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP URL for Telegram to get a file from the Internet, or pass "attach://" to upload a new one using multipart/form-data under name. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Media InputFile `json:"media"` - // Optional. Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb *InputFile `json:"thumb,omitempty"` - // Optional. Caption of the audio to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the audio caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Duration of the audio in seconds - Duration int64 `json:"duration,omitempty"` - // Optional. Performer of the audio - Performer string `json:"performer,omitempty"` - // Optional. Title of the audio - Title string `json:"title,omitempty"` -} - -func (v InputMediaAudio) InputMediaParams(mediaName string, data map[string]NamedReader) ([]byte, error) { - if v.Media != nil { - switch m := v.Media.(type) { - case string: - // ok, noop - - case NamedReader: - v.Media = "attach://" + mediaName - data[mediaName] = m - - case io.Reader: - v.Media = "attach://" + mediaName - data[mediaName] = NamedFile{File: m} - - default: - return nil, fmt.Errorf("unknown type for InputMedia: %T", v.Media) - } - } - - return json.Marshal(v) -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InputMediaAudio) GetType() string { - return "audio" -} - -// GetMedia is a helper method to easily access the common fields of an interface. -func (v InputMediaAudio) GetMedia() InputFile { - return v.Media -} - -// MergeInputMedia returns a MergedInputMedia struct to simplify working with types in a non-generic world. -func (v InputMediaAudio) MergeInputMedia() MergedInputMedia { - return MergedInputMedia{ - Type: "audio", - Media: v.Media, - Thumb: v.Thumb, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - Duration: v.Duration, - Performer: v.Performer, - Title: v.Title, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InputMediaAudio) MarshalJSON() ([]byte, error) { - type alias InputMediaAudio - a := struct { - Type string `json:"type"` - alias - }{ - Type: "audio", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InputMediaAudio.inputMedia is a dummy method to avoid interface implementation. -func (v InputMediaAudio) inputMedia() {} - -// InputMediaDocument Represents a general file to be sent. -// https://core.telegram.org/bots/api#inputmediadocument -type InputMediaDocument struct { - // File to send. Pass a file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP URL for Telegram to get a file from the Internet, or pass "attach://" to upload a new one using multipart/form-data under name. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Media InputFile `json:"media"` - // Optional. Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb *InputFile `json:"thumb,omitempty"` - // Optional. Caption of the document to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the document caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Disables automatic server-side content type detection for files uploaded using multipart/form-data. Always True, if the document is sent as part of an album. - DisableContentTypeDetection bool `json:"disable_content_type_detection,omitempty"` -} - -func (v InputMediaDocument) InputMediaParams(mediaName string, data map[string]NamedReader) ([]byte, error) { - if v.Media != nil { - switch m := v.Media.(type) { - case string: - // ok, noop - - case NamedReader: - v.Media = "attach://" + mediaName - data[mediaName] = m - - case io.Reader: - v.Media = "attach://" + mediaName - data[mediaName] = NamedFile{File: m} - - default: - return nil, fmt.Errorf("unknown type for InputMedia: %T", v.Media) - } - } - - return json.Marshal(v) -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InputMediaDocument) GetType() string { - return "document" -} - -// GetMedia is a helper method to easily access the common fields of an interface. -func (v InputMediaDocument) GetMedia() InputFile { - return v.Media -} - -// MergeInputMedia returns a MergedInputMedia struct to simplify working with types in a non-generic world. -func (v InputMediaDocument) MergeInputMedia() MergedInputMedia { - return MergedInputMedia{ - Type: "document", - Media: v.Media, - Thumb: v.Thumb, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - DisableContentTypeDetection: v.DisableContentTypeDetection, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InputMediaDocument) MarshalJSON() ([]byte, error) { - type alias InputMediaDocument - a := struct { - Type string `json:"type"` - alias - }{ - Type: "document", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InputMediaDocument.inputMedia is a dummy method to avoid interface implementation. -func (v InputMediaDocument) inputMedia() {} - -// InputMediaPhoto Represents a photo to be sent. -// https://core.telegram.org/bots/api#inputmediaphoto -type InputMediaPhoto struct { - // File to send. Pass a file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP URL for Telegram to get a file from the Internet, or pass "attach://" to upload a new one using multipart/form-data under name. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Media InputFile `json:"media"` - // Optional. Caption of the photo to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the photo caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` -} - -func (v InputMediaPhoto) InputMediaParams(mediaName string, data map[string]NamedReader) ([]byte, error) { - if v.Media != nil { - switch m := v.Media.(type) { - case string: - // ok, noop - - case NamedReader: - v.Media = "attach://" + mediaName - data[mediaName] = m - - case io.Reader: - v.Media = "attach://" + mediaName - data[mediaName] = NamedFile{File: m} - - default: - return nil, fmt.Errorf("unknown type for InputMedia: %T", v.Media) - } - } - - return json.Marshal(v) -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InputMediaPhoto) GetType() string { - return "photo" -} - -// GetMedia is a helper method to easily access the common fields of an interface. -func (v InputMediaPhoto) GetMedia() InputFile { - return v.Media -} - -// MergeInputMedia returns a MergedInputMedia struct to simplify working with types in a non-generic world. -func (v InputMediaPhoto) MergeInputMedia() MergedInputMedia { - return MergedInputMedia{ - Type: "photo", - Media: v.Media, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InputMediaPhoto) MarshalJSON() ([]byte, error) { - type alias InputMediaPhoto - a := struct { - Type string `json:"type"` - alias - }{ - Type: "photo", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InputMediaPhoto.inputMedia is a dummy method to avoid interface implementation. -func (v InputMediaPhoto) inputMedia() {} - -// InputMediaVideo Represents a video to be sent. -// https://core.telegram.org/bots/api#inputmediavideo -type InputMediaVideo struct { - // File to send. Pass a file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP URL for Telegram to get a file from the Internet, or pass "attach://" to upload a new one using multipart/form-data under name. More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Media InputFile `json:"media"` - // Optional. Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://" if the thumbnail was uploaded using multipart/form-data under . More information on Sending Files: https://core.telegram.org/bots/api#sending-files - Thumb *InputFile `json:"thumb,omitempty"` - // Optional. Caption of the video to be sent, 0-1024 characters after entities parsing - Caption string `json:"caption,omitempty"` - // Optional. Mode for parsing entities in the video caption. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in the caption, which can be specified instead of parse_mode - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Video width - Width int64 `json:"width,omitempty"` - // Optional. Video height - Height int64 `json:"height,omitempty"` - // Optional. Video duration in seconds - Duration int64 `json:"duration,omitempty"` - // Optional. Pass True if the uploaded video is suitable for streaming - SupportsStreaming bool `json:"supports_streaming,omitempty"` -} - -func (v InputMediaVideo) InputMediaParams(mediaName string, data map[string]NamedReader) ([]byte, error) { - if v.Media != nil { - switch m := v.Media.(type) { - case string: - // ok, noop - - case NamedReader: - v.Media = "attach://" + mediaName - data[mediaName] = m - - case io.Reader: - v.Media = "attach://" + mediaName - data[mediaName] = NamedFile{File: m} - - default: - return nil, fmt.Errorf("unknown type for InputMedia: %T", v.Media) - } - } - - return json.Marshal(v) -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v InputMediaVideo) GetType() string { - return "video" -} - -// GetMedia is a helper method to easily access the common fields of an interface. -func (v InputMediaVideo) GetMedia() InputFile { - return v.Media -} - -// MergeInputMedia returns a MergedInputMedia struct to simplify working with types in a non-generic world. -func (v InputMediaVideo) MergeInputMedia() MergedInputMedia { - return MergedInputMedia{ - Type: "video", - Media: v.Media, - Thumb: v.Thumb, - Caption: v.Caption, - ParseMode: v.ParseMode, - CaptionEntities: v.CaptionEntities, - Width: v.Width, - Height: v.Height, - Duration: v.Duration, - SupportsStreaming: v.SupportsStreaming, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v InputMediaVideo) MarshalJSON() ([]byte, error) { - type alias InputMediaVideo - a := struct { - Type string `json:"type"` - alias - }{ - Type: "video", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// InputMediaVideo.inputMedia is a dummy method to avoid interface implementation. -func (v InputMediaVideo) inputMedia() {} - -// InputMessageContent This object represents the content of a message to be sent as a result of an inline query. Telegram clients currently support the following 5 types: -// - InputTextMessageContent -// - InputLocationMessageContent -// - InputVenueMessageContent -// - InputContactMessageContent -// - InputInvoiceMessageContent -// https://core.telegram.org/bots/api#inputmessagecontent -type InputMessageContent interface { - inputMessageContent() -} - -// InputTextMessageContent Represents the content of a text message to be sent as the result of an inline query. -// https://core.telegram.org/bots/api#inputtextmessagecontent -type InputTextMessageContent struct { - // Text of the message to be sent, 1-4096 characters - MessageText string `json:"message_text"` - // Optional. Mode for parsing entities in the message text. See formatting options for more details. - ParseMode string `json:"parse_mode,omitempty"` - // Optional. List of special entities that appear in message text, which can be specified instead of parse_mode - Entities []MessageEntity `json:"entities,omitempty"` - // Optional. Disables link previews for links in the sent message - DisableWebPagePreview bool `json:"disable_web_page_preview,omitempty"` -} - -// InputTextMessageContent.inputMessageContent is a dummy method to avoid interface implementation. -func (v InputTextMessageContent) inputMessageContent() {} - -// InputVenueMessageContent Represents the content of a venue message to be sent as the result of an inline query. -// https://core.telegram.org/bots/api#inputvenuemessagecontent -type InputVenueMessageContent struct { - // Latitude of the venue in degrees - Latitude float64 `json:"latitude"` - // Longitude of the venue in degrees - Longitude float64 `json:"longitude"` - // Name of the venue - Title string `json:"title"` - // Address of the venue - Address string `json:"address"` - // Optional. Foursquare identifier of the venue, if known - FoursquareId string `json:"foursquare_id,omitempty"` - // Optional. Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) - FoursquareType string `json:"foursquare_type,omitempty"` - // Optional. Google Places identifier of the venue - GooglePlaceId string `json:"google_place_id,omitempty"` - // Optional. Google Places type of the venue. (See supported types.) - GooglePlaceType string `json:"google_place_type,omitempty"` -} - -// InputVenueMessageContent.inputMessageContent is a dummy method to avoid interface implementation. -func (v InputVenueMessageContent) inputMessageContent() {} - -// Invoice This object contains basic information about an invoice. -// https://core.telegram.org/bots/api#invoice -type Invoice struct { - // Product name - Title string `json:"title"` - // Product description - Description string `json:"description"` - // Unique bot deep-linking parameter that can be used to generate this invoice - StartParameter string `json:"start_parameter"` - // Three-letter ISO 4217 currency code - Currency string `json:"currency"` - // Total price in the smallest units of the currency (integer, not float/double). For example, for a price of US$ 1.45 pass amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). - TotalAmount int64 `json:"total_amount"` -} - -// KeyboardButton This object represents one button of the reply keyboard. For simple text buttons String can be used instead of this object to specify text of the button. Optional fields web_app, request_contact, request_location, and request_poll are mutually exclusive. -// Note: request_contact and request_location options will only work in Telegram versions released after 9 April, 2016. Older clients will display unsupported message. -// Note: request_poll option will only work in Telegram versions released after 23 January, 2020. Older clients will display unsupported message. -// Note: web_app option will only work in Telegram versions released after 16 April, 2022. Older clients will display unsupported message. -// https://core.telegram.org/bots/api#keyboardbutton -type KeyboardButton struct { - // Text of the button. If none of the optional fields are used, it will be sent as a message when the button is pressed - Text string `json:"text"` - // Optional. If True, the user's phone number will be sent as a contact when the button is pressed. Available in private chats only. - RequestContact bool `json:"request_contact,omitempty"` - // Optional. If True, the user's current location will be sent when the button is pressed. Available in private chats only. - RequestLocation bool `json:"request_location,omitempty"` - // Optional. If specified, the user will be asked to create a poll and send it to the bot when the button is pressed. Available in private chats only. - RequestPoll *KeyboardButtonPollType `json:"request_poll,omitempty"` - // Optional. If specified, the described Web App will be launched when the button is pressed. The Web App will be able to send a "web_app_data" service message. Available in private chats only. - WebApp *WebAppInfo `json:"web_app,omitempty"` -} - -// KeyboardButtonPollType This object represents type of a poll, which is allowed to be created and sent when the corresponding button is pressed. -// https://core.telegram.org/bots/api#keyboardbuttonpolltype -type KeyboardButtonPollType struct { - // Optional. If quiz is passed, the user will be allowed to create only polls in the quiz mode. If regular is passed, only regular polls will be allowed. Otherwise, the user will be allowed to create a poll of any type. - Type string `json:"type,omitempty"` -} - -// LabeledPrice This object represents a portion of the price for goods or services. -// https://core.telegram.org/bots/api#labeledprice -type LabeledPrice struct { - // Portion label - Label string `json:"label"` - // Price of the product in the smallest units of the currency (integer, not float/double). For example, for a price of US$ 1.45 pass amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). - Amount int64 `json:"amount"` -} - -// Location This object represents a point on the map. -// https://core.telegram.org/bots/api#location -type Location struct { - // Longitude as defined by sender - Longitude float64 `json:"longitude"` - // Latitude as defined by sender - Latitude float64 `json:"latitude"` - // Optional. The radius of uncertainty for the location, measured in meters; 0-1500 - HorizontalAccuracy float64 `json:"horizontal_accuracy,omitempty"` - // Optional. Time relative to the message sending date, during which the location can be updated; in seconds. For active live locations only. - LivePeriod int64 `json:"live_period,omitempty"` - // Optional. The direction in which user is moving, in degrees; 1-360. For active live locations only. - Heading int64 `json:"heading,omitempty"` - // Optional. The maximum distance for proximity alerts about approaching another chat member, in meters. For sent live locations only. - ProximityAlertRadius int64 `json:"proximity_alert_radius,omitempty"` -} - -// LoginUrl This object represents a parameter of the inline keyboard button used to automatically authorize a user. Serves as a great replacement for the Telegram Login Widget when the user is coming from Telegram. All the user needs to do is tap/click a button and confirm that they want to log in: -// Telegram apps support these buttons as of version 5.7. -// https://core.telegram.org/bots/api#loginurl -type LoginUrl struct { - // An HTTPS URL to be opened with user authorization data added to the query string when the button is pressed. If the user refuses to provide authorization data, the original URL without information about the user will be opened. The data added is the same as described in Receiving authorization data. NOTE: You must always check the hash of the received data to verify the authentication and the integrity of the data as described in Checking authorization. - Url string `json:"url"` - // Optional. New text of the button in forwarded messages. - ForwardText string `json:"forward_text,omitempty"` - // Optional. Username of a bot, which will be used for user authorization. See Setting up a bot for more details. If not specified, the current bot's username will be assumed. The url's domain must be the same as the domain linked with the bot. See Linking your domain to the bot for more details. - BotUsername string `json:"bot_username,omitempty"` - // Optional. Pass True to request the permission for your bot to send messages to the user. - RequestWriteAccess bool `json:"request_write_access,omitempty"` -} - -// MaskPosition This object describes the position on faces where a mask should be placed by default. -// https://core.telegram.org/bots/api#maskposition -type MaskPosition struct { - // The part of the face relative to which the mask should be placed. One of "forehead", "eyes", "mouth", or "chin". - Point string `json:"point"` - // Shift by X-axis measured in widths of the mask scaled to the face size, from left to right. For example, choosing -1.0 will place mask just to the left of the default mask position. - XShift float64 `json:"x_shift"` - // Shift by Y-axis measured in heights of the mask scaled to the face size, from top to bottom. For example, 1.0 will place the mask just below the default mask position. - YShift float64 `json:"y_shift"` - // Mask scaling coefficient. For example, 2.0 means double size. - Scale float64 `json:"scale"` -} - -// MenuButton This object describes the bot's menu button in a private chat. It should be one of -// - MenuButtonCommands -// - MenuButtonWebApp -// - MenuButtonDefault -// If a menu button other than MenuButtonDefault is set for a private chat, then it is applied in the chat. Otherwise the default menu button is applied. By default, the menu button opens the list of bot commands. -// https://core.telegram.org/bots/api#menubutton -type MenuButton interface { - GetType() string - menuButton() - // MergeMenuButton returns a MergedMenuButton struct to simplify working with complex telegram types in a non-generic world. - MergeMenuButton() MergedMenuButton -} - -// MergedMenuButton is a helper type to simplify interactions with the various MenuButton subtypes. -type MergedMenuButton struct { - // Type of the button, must be commands - Type string `json:"type"` - // Optional. Text on the button (Only for web_app) - Text string `json:"text,omitempty"` - // Optional. Description of the Web App that will be launched when the user presses the button. The Web App will be able to send an arbitrary message on behalf of the user using the method answerWebAppQuery. (Only for web_app) - WebApp *WebAppInfo `json:"web_app,omitempty"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MergedMenuButton) GetType() string { - return v.Type -} - -// MergedMenuButton.menuButton is a dummy method to avoid interface implementation. -func (v MergedMenuButton) menuButton() {} - -// MergeMenuButton returns a MergedMenuButton struct to simplify working with types in a non-generic world. -func (v MergedMenuButton) MergeMenuButton() MergedMenuButton { - return v -} - -// unmarshalMenuButtonArray is a JSON unmarshalling helper which allows unmarshalling an array of interfaces -// using unmarshalMenuButton. -func unmarshalMenuButtonArray(d json.RawMessage) ([]MenuButton, error) { - var ds []json.RawMessage - err := json.Unmarshal(d, &ds) - if err != nil { - return nil, err - } - - var vs []MenuButton - for _, d := range ds { - v, err := unmarshalMenuButton(d) - if err != nil { - return nil, err - } - vs = append(vs, v) - } - - return vs, nil -} - -// unmarshalMenuButton is a JSON unmarshal helper to marshal the right structs into a MenuButton interface -// based on the Type field. -func unmarshalMenuButton(d json.RawMessage) (MenuButton, error) { - if len(d) == 0 { - return nil, nil - } - - t := struct { - Type string - }{} - err := json.Unmarshal(d, &t) - if err != nil { - return nil, err - } - - switch t.Type { - case "commands": - s := MenuButtonCommands{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - case "web_app": - s := MenuButtonWebApp{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - case "default": - s := MenuButtonDefault{} - err := json.Unmarshal(d, &s) - if err != nil { - return nil, err - } - return s, nil - - } - return nil, fmt.Errorf("unknown interface with Type %v", t.Type) -} - -// MenuButtonCommands Represents a menu button, which opens the bot's list of commands. -// https://core.telegram.org/bots/api#menubuttoncommands -type MenuButtonCommands struct{} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MenuButtonCommands) GetType() string { - return "commands" -} - -// MergeMenuButton returns a MergedMenuButton struct to simplify working with types in a non-generic world. -func (v MenuButtonCommands) MergeMenuButton() MergedMenuButton { - return MergedMenuButton{ - Type: "commands", - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v MenuButtonCommands) MarshalJSON() ([]byte, error) { - type alias MenuButtonCommands - a := struct { - Type string `json:"type"` - alias - }{ - Type: "commands", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// MenuButtonCommands.menuButton is a dummy method to avoid interface implementation. -func (v MenuButtonCommands) menuButton() {} - -// MenuButtonDefault Describes that no specific value for the menu button was set. -// https://core.telegram.org/bots/api#menubuttondefault -type MenuButtonDefault struct{} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MenuButtonDefault) GetType() string { - return "default" -} - -// MergeMenuButton returns a MergedMenuButton struct to simplify working with types in a non-generic world. -func (v MenuButtonDefault) MergeMenuButton() MergedMenuButton { - return MergedMenuButton{ - Type: "default", - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v MenuButtonDefault) MarshalJSON() ([]byte, error) { - type alias MenuButtonDefault - a := struct { - Type string `json:"type"` - alias - }{ - Type: "default", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// MenuButtonDefault.menuButton is a dummy method to avoid interface implementation. -func (v MenuButtonDefault) menuButton() {} - -// MenuButtonWebApp Represents a menu button, which launches a Web App. -// https://core.telegram.org/bots/api#menubuttonwebapp -type MenuButtonWebApp struct { - // Text on the button - Text string `json:"text"` - // Description of the Web App that will be launched when the user presses the button. The Web App will be able to send an arbitrary message on behalf of the user using the method answerWebAppQuery. - WebApp WebAppInfo `json:"web_app"` -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MenuButtonWebApp) GetType() string { - return "web_app" -} - -// MergeMenuButton returns a MergedMenuButton struct to simplify working with types in a non-generic world. -func (v MenuButtonWebApp) MergeMenuButton() MergedMenuButton { - return MergedMenuButton{ - Type: "web_app", - Text: v.Text, - WebApp: &v.WebApp, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Type value. -func (v MenuButtonWebApp) MarshalJSON() ([]byte, error) { - type alias MenuButtonWebApp - a := struct { - Type string `json:"type"` - alias - }{ - Type: "web_app", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// MenuButtonWebApp.menuButton is a dummy method to avoid interface implementation. -func (v MenuButtonWebApp) menuButton() {} - -// Message This object represents a message. -// https://core.telegram.org/bots/api#message -type Message struct { - // Unique message identifier inside this chat - MessageId int64 `json:"message_id"` - // Optional. Unique identifier of a message thread to which the message belongs; for supergroups only - MessageThreadId int64 `json:"message_thread_id,omitempty"` - // Optional. Sender of the message; empty for messages sent to channels. For backward compatibility, the field contains a fake sender user in non-channel chats, if the message was sent on behalf of a chat. - From *User `json:"from,omitempty"` - // Optional. Sender of the message, sent on behalf of a chat. For example, the channel itself for channel posts, the supergroup itself for messages from anonymous group administrators, the linked channel for messages automatically forwarded to the discussion group. For backward compatibility, the field from contains a fake sender user in non-channel chats, if the message was sent on behalf of a chat. - SenderChat *Chat `json:"sender_chat,omitempty"` - // Date the message was sent in Unix time - Date int64 `json:"date"` - // Conversation the message belongs to - Chat Chat `json:"chat"` - // Optional. For forwarded messages, sender of the original message - ForwardFrom *User `json:"forward_from,omitempty"` - // Optional. For messages forwarded from channels or from anonymous administrators, information about the original sender chat - ForwardFromChat *Chat `json:"forward_from_chat,omitempty"` - // Optional. For messages forwarded from channels, identifier of the original message in the channel - ForwardFromMessageId int64 `json:"forward_from_message_id,omitempty"` - // Optional. For forwarded messages that were originally sent in channels or by an anonymous chat administrator, signature of the message sender if present - ForwardSignature string `json:"forward_signature,omitempty"` - // Optional. Sender's name for messages forwarded from users who disallow adding a link to their account in forwarded messages - ForwardSenderName string `json:"forward_sender_name,omitempty"` - // Optional. For forwarded messages, date the original message was sent in Unix time - ForwardDate int64 `json:"forward_date,omitempty"` - // Optional. True, if the message is sent to a forum topic - IsTopicMessage bool `json:"is_topic_message,omitempty"` - // Optional. True, if the message is a channel post that was automatically forwarded to the connected discussion group - IsAutomaticForward bool `json:"is_automatic_forward,omitempty"` - // Optional. For replies, the original message. Note that the Message object in this field will not contain further reply_to_message fields even if it itself is a reply. - ReplyToMessage *Message `json:"reply_to_message,omitempty"` - // Optional. Bot through which the message was sent - ViaBot *User `json:"via_bot,omitempty"` - // Optional. Date the message was last edited in Unix time - EditDate int64 `json:"edit_date,omitempty"` - // Optional. True, if the message can't be forwarded - HasProtectedContent bool `json:"has_protected_content,omitempty"` - // Optional. The unique identifier of a media message group this message belongs to - MediaGroupId string `json:"media_group_id,omitempty"` - // Optional. Signature of the post author for messages in channels, or the custom title of an anonymous group administrator - AuthorSignature string `json:"author_signature,omitempty"` - // Optional. For text messages, the actual UTF-8 text of the message - Text string `json:"text,omitempty"` - // Optional. For text messages, special entities like usernames, URLs, bot commands, etc. that appear in the text - Entities []MessageEntity `json:"entities,omitempty"` - // Optional. Message is an animation, information about the animation. For backward compatibility, when this field is set, the document field will also be set - Animation *Animation `json:"animation,omitempty"` - // Optional. Message is an audio file, information about the file - Audio *Audio `json:"audio,omitempty"` - // Optional. Message is a general file, information about the file - Document *Document `json:"document,omitempty"` - // Optional. Message is a photo, available sizes of the photo - Photo []PhotoSize `json:"photo,omitempty"` - // Optional. Message is a sticker, information about the sticker - Sticker *Sticker `json:"sticker,omitempty"` - // Optional. Message is a video, information about the video - Video *Video `json:"video,omitempty"` - // Optional. Message is a video note, information about the video message - VideoNote *VideoNote `json:"video_note,omitempty"` - // Optional. Message is a voice message, information about the file - Voice *Voice `json:"voice,omitempty"` - // Optional. Caption for the animation, audio, document, photo, video or voice - Caption string `json:"caption,omitempty"` - // Optional. For messages with a caption, special entities like usernames, URLs, bot commands, etc. that appear in the caption - CaptionEntities []MessageEntity `json:"caption_entities,omitempty"` - // Optional. Message is a shared contact, information about the contact - Contact *Contact `json:"contact,omitempty"` - // Optional. Message is a dice with random value - Dice *Dice `json:"dice,omitempty"` - // Optional. Message is a game, information about the game. More about games: https://core.telegram.org/bots/api#games - Game *Game `json:"game,omitempty"` - // Optional. Message is a native poll, information about the poll - Poll *Poll `json:"poll,omitempty"` - // Optional. Message is a venue, information about the venue. For backward compatibility, when this field is set, the location field will also be set - Venue *Venue `json:"venue,omitempty"` - // Optional. Message is a shared location, information about the location - Location *Location `json:"location,omitempty"` - // Optional. New members that were added to the group or supergroup and information about them (the bot itself may be one of these members) - NewChatMembers []User `json:"new_chat_members,omitempty"` - // Optional. A member was removed from the group, information about them (this member may be the bot itself) - LeftChatMember *User `json:"left_chat_member,omitempty"` - // Optional. A chat title was changed to this value - NewChatTitle string `json:"new_chat_title,omitempty"` - // Optional. A chat photo was change to this value - NewChatPhoto []PhotoSize `json:"new_chat_photo,omitempty"` - // Optional. Service message: the chat photo was deleted - DeleteChatPhoto bool `json:"delete_chat_photo,omitempty"` - // Optional. Service message: the group has been created - GroupChatCreated bool `json:"group_chat_created,omitempty"` - // Optional. Service message: the supergroup has been created. This field can't be received in a message coming through updates, because bot can't be a member of a supergroup when it is created. It can only be found in reply_to_message if someone replies to a very first message in a directly created supergroup. - SupergroupChatCreated bool `json:"supergroup_chat_created,omitempty"` - // Optional. Service message: the channel has been created. This field can't be received in a message coming through updates, because bot can't be a member of a channel when it is created. It can only be found in reply_to_message if someone replies to a very first message in a channel. - ChannelChatCreated bool `json:"channel_chat_created,omitempty"` - // Optional. Service message: auto-delete timer settings changed in the chat - MessageAutoDeleteTimerChanged *MessageAutoDeleteTimerChanged `json:"message_auto_delete_timer_changed,omitempty"` - // Optional. The group has been migrated to a supergroup with the specified identifier. This number may have more than 32 significant bits and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this identifier. - MigrateToChatId int64 `json:"migrate_to_chat_id,omitempty"` - // Optional. The supergroup has been migrated from a group with the specified identifier. This number may have more than 32 significant bits and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this identifier. - MigrateFromChatId int64 `json:"migrate_from_chat_id,omitempty"` - // Optional. Specified message was pinned. Note that the Message object in this field will not contain further reply_to_message fields even if it is itself a reply. - PinnedMessage *Message `json:"pinned_message,omitempty"` - // Optional. Message is an invoice for a payment, information about the invoice. More about payments: https://core.telegram.org/bots/api#payments - Invoice *Invoice `json:"invoice,omitempty"` - // Optional. Message is a service message about a successful payment, information about the payment. More about payments: https://core.telegram.org/bots/api#payments - SuccessfulPayment *SuccessfulPayment `json:"successful_payment,omitempty"` - // Optional. The domain name of the website on which the user has logged in. More about Telegram Login: https://core.telegram.org/widgets/login - ConnectedWebsite string `json:"connected_website,omitempty"` - // Optional. Telegram Passport data - PassportData *PassportData `json:"passport_data,omitempty"` - // Optional. Service message. A user in the chat triggered another user's proximity alert while sharing Live Location. - ProximityAlertTriggered *ProximityAlertTriggered `json:"proximity_alert_triggered,omitempty"` - // Optional. Service message: forum topic created - ForumTopicCreated *ForumTopicCreated `json:"forum_topic_created,omitempty"` - // Optional. Service message: forum topic closed - ForumTopicClosed *ForumTopicClosed `json:"forum_topic_closed,omitempty"` - // Optional. Service message: forum topic reopened - ForumTopicReopened *ForumTopicReopened `json:"forum_topic_reopened,omitempty"` - // Optional. Service message: video chat scheduled - VideoChatScheduled *VideoChatScheduled `json:"video_chat_scheduled,omitempty"` - // Optional. Service message: video chat started - VideoChatStarted *VideoChatStarted `json:"video_chat_started,omitempty"` - // Optional. Service message: video chat ended - VideoChatEnded *VideoChatEnded `json:"video_chat_ended,omitempty"` - // Optional. Service message: new participants invited to a video chat - VideoChatParticipantsInvited *VideoChatParticipantsInvited `json:"video_chat_participants_invited,omitempty"` - // Optional. Service message: data sent by a Web App - WebAppData *WebAppData `json:"web_app_data,omitempty"` - // Optional. Inline keyboard attached to the message. login_url buttons are represented as ordinary url buttons. - ReplyMarkup *InlineKeyboardMarkup `json:"reply_markup,omitempty"` -} - -// MessageAutoDeleteTimerChanged This object represents a service message about a change in auto-delete timer settings. -// https://core.telegram.org/bots/api#messageautodeletetimerchanged -type MessageAutoDeleteTimerChanged struct { - // New auto-delete time for messages in the chat; in seconds - MessageAutoDeleteTime int64 `json:"message_auto_delete_time"` -} - -// MessageEntity This object represents one special entity in a text message. For example, hashtags, usernames, URLs, etc. -// https://core.telegram.org/bots/api#messageentity -type MessageEntity struct { - // Type of the entity. Currently, can be "mention" (@username), "hashtag" (#hashtag), "cashtag" ($USD), "bot_command" (/start@jobs_bot), "url" (https://telegram.org), "email" (do-not-reply@telegram.org), "phone_number" (+1-212-555-0123), "bold" (bold text), "italic" (italic text), "underline" (underlined text), "strikethrough" (strikethrough text), "spoiler" (spoiler message), "code" (monowidth string), "pre" (monowidth block), "text_link" (for clickable text URLs), "text_mention" (for users without usernames), "custom_emoji" (for inline custom emoji stickers) - Type string `json:"type"` - // Offset in UTF-16 code units to the start of the entity - Offset int64 `json:"offset"` - // Length of the entity in UTF-16 code units - Length int64 `json:"length"` - // Optional. For "text_link" only, URL that will be opened after user taps on the text - Url string `json:"url,omitempty"` - // Optional. For "text_mention" only, the mentioned user - User *User `json:"user,omitempty"` - // Optional. For "pre" only, the programming language of the entity text - Language string `json:"language,omitempty"` - // Optional. For "custom_emoji" only, unique identifier of the custom emoji. Use getCustomEmojiStickers to get full information about the sticker - CustomEmojiId string `json:"custom_emoji_id,omitempty"` -} - -// MessageId This object represents a unique message identifier. -// https://core.telegram.org/bots/api#messageid -type MessageId struct { - // Unique message identifier - MessageId int64 `json:"message_id"` -} - -// OrderInfo This object represents information about an order. -// https://core.telegram.org/bots/api#orderinfo -type OrderInfo struct { - // Optional. User name - Name string `json:"name,omitempty"` - // Optional. User's phone number - PhoneNumber string `json:"phone_number,omitempty"` - // Optional. User email - Email string `json:"email,omitempty"` - // Optional. User shipping address - ShippingAddress *ShippingAddress `json:"shipping_address,omitempty"` -} - -// PassportData Describes Telegram Passport data shared with the bot by the user. -// https://core.telegram.org/bots/api#passportdata -type PassportData struct { - // Array with information about documents and other Telegram Passport elements that was shared with the bot - Data []EncryptedPassportElement `json:"data,omitempty"` - // Encrypted credentials required to decrypt the data - Credentials EncryptedCredentials `json:"credentials"` -} - -// PassportElementError This object represents an error in the Telegram Passport element which was submitted that should be resolved by the user. It should be one of: -// - PassportElementErrorDataField -// - PassportElementErrorFrontSide -// - PassportElementErrorReverseSide -// - PassportElementErrorSelfie -// - PassportElementErrorFile -// - PassportElementErrorFiles -// - PassportElementErrorTranslationFile -// - PassportElementErrorTranslationFiles -// - PassportElementErrorUnspecified -// https://core.telegram.org/bots/api#passportelementerror -type PassportElementError interface { - GetSource() string - GetType() string - GetMessage() string - passportElementError() - // MergePassportElementError returns a MergedPassportElementError struct to simplify working with complex telegram types in a non-generic world. - MergePassportElementError() MergedPassportElementError -} - -// MergedPassportElementError is a helper type to simplify interactions with the various PassportElementError subtypes. -type MergedPassportElementError struct { - // Error source, must be data - Source string `json:"source"` - // The section of the user's Telegram Passport which has the error, one of "personal_details", "passport", "driver_license", "identity_card", "internal_passport", "address" - Type string `json:"type"` - // Optional. Name of the data field which has the error (Only for data) - FieldName string `json:"field_name,omitempty"` - // Optional. Base64-encoded data hash (Only for data) - DataHash string `json:"data_hash,omitempty"` - // Error message - Message string `json:"message"` - // Optional. Base64-encoded hash of the file with the front side of the document (Only for front_side, reverse_side, selfie, file, translation_file) - FileHash string `json:"file_hash,omitempty"` - // Optional. List of base64-encoded file hashes (Only for files, translation_files) - FileHashes []string `json:"file_hashes,omitempty"` - // Optional. Base64-encoded element hash (Only for unspecified) - ElementHash string `json:"element_hash,omitempty"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v MergedPassportElementError) GetSource() string { - return v.Source -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v MergedPassportElementError) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v MergedPassportElementError) GetMessage() string { - return v.Message -} - -// MergedPassportElementError.passportElementError is a dummy method to avoid interface implementation. -func (v MergedPassportElementError) passportElementError() {} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v MergedPassportElementError) MergePassportElementError() MergedPassportElementError { - return v -} - -// PassportElementErrorDataField Represents an issue in one of the data fields that was provided by the user. The error is considered resolved when the field's value changes. -// https://core.telegram.org/bots/api#passportelementerrordatafield -type PassportElementErrorDataField struct { - // The section of the user's Telegram Passport which has the error, one of "personal_details", "passport", "driver_license", "identity_card", "internal_passport", "address" - Type string `json:"type"` - // Name of the data field which has the error - FieldName string `json:"field_name"` - // Base64-encoded data hash - DataHash string `json:"data_hash"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorDataField) GetSource() string { - return "data" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorDataField) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorDataField) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorDataField) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "data", - Type: v.Type, - FieldName: v.FieldName, - DataHash: v.DataHash, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorDataField) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorDataField - a := struct { - Source string `json:"source"` - alias - }{ - Source: "data", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorDataField.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorDataField) passportElementError() {} - -// PassportElementErrorFile Represents an issue with a document scan. The error is considered resolved when the file with the document scan changes. -// https://core.telegram.org/bots/api#passportelementerrorfile -type PassportElementErrorFile struct { - // The section of the user's Telegram Passport which has the issue, one of "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration" - Type string `json:"type"` - // Base64-encoded file hash - FileHash string `json:"file_hash"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFile) GetSource() string { - return "file" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFile) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFile) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorFile) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "file", - Type: v.Type, - FileHash: v.FileHash, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorFile) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorFile - a := struct { - Source string `json:"source"` - alias - }{ - Source: "file", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorFile.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorFile) passportElementError() {} - -// PassportElementErrorFiles Represents an issue with a list of scans. The error is considered resolved when the list of files containing the scans changes. -// https://core.telegram.org/bots/api#passportelementerrorfiles -type PassportElementErrorFiles struct { - // The section of the user's Telegram Passport which has the issue, one of "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration" - Type string `json:"type"` - // List of base64-encoded file hashes - FileHashes []string `json:"file_hashes,omitempty"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFiles) GetSource() string { - return "files" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFiles) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFiles) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorFiles) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "files", - Type: v.Type, - FileHashes: v.FileHashes, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorFiles) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorFiles - a := struct { - Source string `json:"source"` - alias - }{ - Source: "files", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorFiles.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorFiles) passportElementError() {} - -// PassportElementErrorFrontSide Represents an issue with the front side of a document. The error is considered resolved when the file with the front side of the document changes. -// https://core.telegram.org/bots/api#passportelementerrorfrontside -type PassportElementErrorFrontSide struct { - // The section of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport" - Type string `json:"type"` - // Base64-encoded hash of the file with the front side of the document - FileHash string `json:"file_hash"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFrontSide) GetSource() string { - return "front_side" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFrontSide) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorFrontSide) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorFrontSide) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "front_side", - Type: v.Type, - FileHash: v.FileHash, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorFrontSide) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorFrontSide - a := struct { - Source string `json:"source"` - alias - }{ - Source: "front_side", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorFrontSide.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorFrontSide) passportElementError() {} - -// PassportElementErrorReverseSide Represents an issue with the reverse side of a document. The error is considered resolved when the file with reverse side of the document changes. -// https://core.telegram.org/bots/api#passportelementerrorreverseside -type PassportElementErrorReverseSide struct { - // The section of the user's Telegram Passport which has the issue, one of "driver_license", "identity_card" - Type string `json:"type"` - // Base64-encoded hash of the file with the reverse side of the document - FileHash string `json:"file_hash"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorReverseSide) GetSource() string { - return "reverse_side" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorReverseSide) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorReverseSide) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorReverseSide) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "reverse_side", - Type: v.Type, - FileHash: v.FileHash, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorReverseSide) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorReverseSide - a := struct { - Source string `json:"source"` - alias - }{ - Source: "reverse_side", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorReverseSide.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorReverseSide) passportElementError() {} - -// PassportElementErrorSelfie Represents an issue with the selfie with a document. The error is considered resolved when the file with the selfie changes. -// https://core.telegram.org/bots/api#passportelementerrorselfie -type PassportElementErrorSelfie struct { - // The section of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport" - Type string `json:"type"` - // Base64-encoded hash of the file with the selfie - FileHash string `json:"file_hash"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorSelfie) GetSource() string { - return "selfie" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorSelfie) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorSelfie) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorSelfie) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "selfie", - Type: v.Type, - FileHash: v.FileHash, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorSelfie) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorSelfie - a := struct { - Source string `json:"source"` - alias - }{ - Source: "selfie", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorSelfie.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorSelfie) passportElementError() {} - -// PassportElementErrorTranslationFile Represents an issue with one of the files that constitute the translation of a document. The error is considered resolved when the file changes. -// https://core.telegram.org/bots/api#passportelementerrortranslationfile -type PassportElementErrorTranslationFile struct { - // Type of element of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration" - Type string `json:"type"` - // Base64-encoded file hash - FileHash string `json:"file_hash"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorTranslationFile) GetSource() string { - return "translation_file" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorTranslationFile) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorTranslationFile) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorTranslationFile) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "translation_file", - Type: v.Type, - FileHash: v.FileHash, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorTranslationFile) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorTranslationFile - a := struct { - Source string `json:"source"` - alias - }{ - Source: "translation_file", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorTranslationFile.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorTranslationFile) passportElementError() {} - -// PassportElementErrorTranslationFiles Represents an issue with the translated version of a document. The error is considered resolved when a file with the document translation change. -// https://core.telegram.org/bots/api#passportelementerrortranslationfiles -type PassportElementErrorTranslationFiles struct { - // Type of element of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration" - Type string `json:"type"` - // List of base64-encoded file hashes - FileHashes []string `json:"file_hashes,omitempty"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorTranslationFiles) GetSource() string { - return "translation_files" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorTranslationFiles) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorTranslationFiles) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorTranslationFiles) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "translation_files", - Type: v.Type, - FileHashes: v.FileHashes, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorTranslationFiles) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorTranslationFiles - a := struct { - Source string `json:"source"` - alias - }{ - Source: "translation_files", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorTranslationFiles.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorTranslationFiles) passportElementError() {} - -// PassportElementErrorUnspecified Represents an issue in an unspecified place. The error is considered resolved when new data is added. -// https://core.telegram.org/bots/api#passportelementerrorunspecified -type PassportElementErrorUnspecified struct { - // Type of element of the user's Telegram Passport which has the issue - Type string `json:"type"` - // Base64-encoded element hash - ElementHash string `json:"element_hash"` - // Error message - Message string `json:"message"` -} - -// GetSource is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorUnspecified) GetSource() string { - return "unspecified" -} - -// GetType is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorUnspecified) GetType() string { - return v.Type -} - -// GetMessage is a helper method to easily access the common fields of an interface. -func (v PassportElementErrorUnspecified) GetMessage() string { - return v.Message -} - -// MergePassportElementError returns a MergedPassportElementError struct to simplify working with types in a non-generic world. -func (v PassportElementErrorUnspecified) MergePassportElementError() MergedPassportElementError { - return MergedPassportElementError{ - Source: "unspecified", - Type: v.Type, - ElementHash: v.ElementHash, - Message: v.Message, - } -} - -// MarshalJSON is a custom JSON marshaller to allow for enforcing the Source value. -func (v PassportElementErrorUnspecified) MarshalJSON() ([]byte, error) { - type alias PassportElementErrorUnspecified - a := struct { - Source string `json:"source"` - alias - }{ - Source: "unspecified", - alias: (alias)(v), - } - return json.Marshal(a) -} - -// PassportElementErrorUnspecified.passportElementError is a dummy method to avoid interface implementation. -func (v PassportElementErrorUnspecified) passportElementError() {} - -// PassportFile This object represents a file uploaded to Telegram Passport. Currently all Telegram Passport files are in JPEG format when decrypted and don't exceed 10MB. -// https://core.telegram.org/bots/api#passportfile -type PassportFile struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // File size in bytes - FileSize int64 `json:"file_size"` - // Unix time when the file was uploaded - FileDate int64 `json:"file_date"` -} - -// PhotoSize This object represents one size of a photo or a file / sticker thumbnail. -// https://core.telegram.org/bots/api#photosize -type PhotoSize struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Photo width - Width int64 `json:"width"` - // Photo height - Height int64 `json:"height"` - // Optional. File size in bytes - FileSize int64 `json:"file_size,omitempty"` -} - -// Poll This object contains information about a poll. -// https://core.telegram.org/bots/api#poll -type Poll struct { - // Unique poll identifier - Id string `json:"id"` - // Poll question, 1-300 characters - Question string `json:"question"` - // List of poll options - Options []PollOption `json:"options,omitempty"` - // Total number of users that voted in the poll - TotalVoterCount int64 `json:"total_voter_count"` - // True, if the poll is closed - IsClosed bool `json:"is_closed"` - // True, if the poll is anonymous - IsAnonymous bool `json:"is_anonymous"` - // Poll type, currently can be "regular" or "quiz" - Type string `json:"type"` - // True, if the poll allows multiple answers - AllowsMultipleAnswers bool `json:"allows_multiple_answers"` - // Optional. 0-based identifier of the correct answer option. Available only for polls in the quiz mode, which are closed, or was sent (not forwarded) by the bot or to the private chat with the bot. - CorrectOptionId int64 `json:"correct_option_id,omitempty"` - // Optional. Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters - Explanation string `json:"explanation,omitempty"` - // Optional. Special entities like usernames, URLs, bot commands, etc. that appear in the explanation - ExplanationEntities []MessageEntity `json:"explanation_entities,omitempty"` - // Optional. Amount of time in seconds the poll will be active after creation - OpenPeriod int64 `json:"open_period,omitempty"` - // Optional. Point in time (Unix timestamp) when the poll will be automatically closed - CloseDate int64 `json:"close_date,omitempty"` -} - -// PollAnswer This object represents an answer of a user in a non-anonymous poll. -// https://core.telegram.org/bots/api#pollanswer -type PollAnswer struct { - // Unique poll identifier - PollId string `json:"poll_id"` - // The user, who changed the answer to the poll - User User `json:"user"` - // 0-based identifiers of answer options, chosen by the user. May be empty if the user retracted their vote. - OptionIds []int64 `json:"option_ids,omitempty"` -} - -// PollOption This object contains information about one answer option in a poll. -// https://core.telegram.org/bots/api#polloption -type PollOption struct { - // Option text, 1-100 characters - Text string `json:"text"` - // Number of users that voted for this option - VoterCount int64 `json:"voter_count"` -} - -// PreCheckoutQuery This object contains information about an incoming pre-checkout query. -// https://core.telegram.org/bots/api#precheckoutquery -type PreCheckoutQuery struct { - // Unique query identifier - Id string `json:"id"` - // User who sent the query - From User `json:"from"` - // Three-letter ISO 4217 currency code - Currency string `json:"currency"` - // Total price in the smallest units of the currency (integer, not float/double). For example, for a price of US$ 1.45 pass amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). - TotalAmount int64 `json:"total_amount"` - // Bot specified invoice payload - InvoicePayload string `json:"invoice_payload"` - // Optional. Identifier of the shipping option chosen by the user - ShippingOptionId string `json:"shipping_option_id,omitempty"` - // Optional. Order information provided by the user - OrderInfo *OrderInfo `json:"order_info,omitempty"` -} - -// ProximityAlertTriggered This object represents the content of a service message, sent whenever a user in the chat triggers a proximity alert set by another user. -// https://core.telegram.org/bots/api#proximityalerttriggered -type ProximityAlertTriggered struct { - // User that triggered the alert - Traveler User `json:"traveler"` - // User that set the alert - Watcher User `json:"watcher"` - // The distance between the users - Distance int64 `json:"distance"` -} - -// ReplyKeyboardMarkup This object represents a custom keyboard with reply options (see Introduction to bots for details and examples). -// https://core.telegram.org/bots/api#replykeyboardmarkup -type ReplyKeyboardMarkup struct { - // Array of button rows, each represented by an Array of KeyboardButton objects - Keyboard [][]KeyboardButton `json:"keyboard,omitempty"` - // Optional. Requests clients to resize the keyboard vertically for optimal fit (e.g., make the keyboard smaller if there are just two rows of buttons). Defaults to false, in which case the custom keyboard is always of the same height as the app's standard keyboard. - ResizeKeyboard bool `json:"resize_keyboard,omitempty"` - // Optional. Requests clients to hide the keyboard as soon as it's been used. The keyboard will still be available, but clients will automatically display the usual letter-keyboard in the chat - the user can press a special button in the input field to see the custom keyboard again. Defaults to false. - OneTimeKeyboard bool `json:"one_time_keyboard,omitempty"` - // Optional. The placeholder to be shown in the input field when the keyboard is active; 1-64 characters - InputFieldPlaceholder string `json:"input_field_placeholder,omitempty"` - // Optional. Use this parameter if you want to show the keyboard to specific users only. Targets: 1) users that are @mentioned in the text of the Message object; 2) if the bot's message is a reply (has reply_to_message_id), sender of the original message. Example: A user requests to change the bot's language, bot replies to the request with a keyboard to select the new language. Other users in the group don't see the keyboard. - Selective bool `json:"selective,omitempty"` -} - -// ReplyKeyboardMarkup.replyMarkup is a dummy method to avoid interface implementation. -func (v ReplyKeyboardMarkup) replyMarkup() {} - -// ReplyKeyboardRemove Upon receiving a message with this object, Telegram clients will remove the current custom keyboard and display the default letter-keyboard. By default, custom keyboards are displayed until a new keyboard is sent by a bot. An exception is made for one-time keyboards that are hidden immediately after the user presses a button (see ReplyKeyboardMarkup). -// https://core.telegram.org/bots/api#replykeyboardremove -type ReplyKeyboardRemove struct { - // Requests clients to remove the custom keyboard (user will not be able to summon this keyboard; if you want to hide the keyboard from sight but keep it accessible, use one_time_keyboard in ReplyKeyboardMarkup) - RemoveKeyboard bool `json:"remove_keyboard"` - // Optional. Use this parameter if you want to remove the keyboard for specific users only. Targets: 1) users that are @mentioned in the text of the Message object; 2) if the bot's message is a reply (has reply_to_message_id), sender of the original message. Example: A user votes in a poll, bot returns confirmation message in reply to the vote and removes the keyboard for that user, while still showing the keyboard with poll options to users who haven't voted yet. - Selective bool `json:"selective,omitempty"` -} - -// ReplyKeyboardRemove.replyMarkup is a dummy method to avoid interface implementation. -func (v ReplyKeyboardRemove) replyMarkup() {} - -// ResponseParameters Describes why a request was unsuccessful. -// https://core.telegram.org/bots/api#responseparameters -type ResponseParameters struct { - // Optional. The group has been migrated to a supergroup with the specified identifier. This number may have more than 32 significant bits and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this identifier. - MigrateToChatId int64 `json:"migrate_to_chat_id,omitempty"` - // Optional. In case of exceeding flood control, the number of seconds left to wait before the request can be repeated - RetryAfter int64 `json:"retry_after,omitempty"` -} - -// SentWebAppMessage Describes an inline message sent by a Web App on behalf of a user. -// https://core.telegram.org/bots/api#sentwebappmessage -type SentWebAppMessage struct { - // Optional. Identifier of the sent inline message. Available only if there is an inline keyboard attached to the message. - InlineMessageId string `json:"inline_message_id,omitempty"` -} - -// ShippingAddress This object represents a shipping address. -// https://core.telegram.org/bots/api#shippingaddress -type ShippingAddress struct { - // Two-letter ISO 3166-1 alpha-2 country code - CountryCode string `json:"country_code"` - // State, if applicable - State string `json:"state"` - // City - City string `json:"city"` - // First line for the address - StreetLine1 string `json:"street_line1"` - // Second line for the address - StreetLine2 string `json:"street_line2"` - // Address post code - PostCode string `json:"post_code"` -} - -// ShippingOption This object represents one shipping option. -// https://core.telegram.org/bots/api#shippingoption -type ShippingOption struct { - // Shipping option identifier - Id string `json:"id"` - // Option title - Title string `json:"title"` - // List of price portions - Prices []LabeledPrice `json:"prices,omitempty"` -} - -// ShippingQuery This object contains information about an incoming shipping query. -// https://core.telegram.org/bots/api#shippingquery -type ShippingQuery struct { - // Unique query identifier - Id string `json:"id"` - // User who sent the query - From User `json:"from"` - // Bot specified invoice payload - InvoicePayload string `json:"invoice_payload"` - // User specified shipping address - ShippingAddress ShippingAddress `json:"shipping_address"` -} - -// Sticker This object represents a sticker. -// https://core.telegram.org/bots/api#sticker -type Sticker struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Type of the sticker, currently one of "regular", "mask", "custom_emoji". The type of the sticker is independent from its format, which is determined by the fields is_animated and is_video. - Type string `json:"type"` - // Sticker width - Width int64 `json:"width"` - // Sticker height - Height int64 `json:"height"` - // True, if the sticker is animated - IsAnimated bool `json:"is_animated"` - // True, if the sticker is a video sticker - IsVideo bool `json:"is_video"` - // Optional. Sticker thumbnail in the .WEBP or .JPG format - Thumb *PhotoSize `json:"thumb,omitempty"` - // Optional. Emoji associated with the sticker - Emoji string `json:"emoji,omitempty"` - // Optional. Name of the sticker set to which the sticker belongs - SetName string `json:"set_name,omitempty"` - // Optional. For premium regular stickers, premium animation for the sticker - PremiumAnimation *File `json:"premium_animation,omitempty"` - // Optional. For mask stickers, the position where the mask should be placed - MaskPosition *MaskPosition `json:"mask_position,omitempty"` - // Optional. For custom emoji stickers, unique identifier of the custom emoji - CustomEmojiId string `json:"custom_emoji_id,omitempty"` - // Optional. File size in bytes - FileSize int64 `json:"file_size,omitempty"` -} - -// StickerSet This object represents a sticker set. -// https://core.telegram.org/bots/api#stickerset -type StickerSet struct { - // Sticker set name - Name string `json:"name"` - // Sticker set title - Title string `json:"title"` - // Type of stickers in the set, currently one of "regular", "mask", "custom_emoji" - StickerType string `json:"sticker_type"` - // True, if the sticker set contains animated stickers - IsAnimated bool `json:"is_animated"` - // True, if the sticker set contains video stickers - IsVideo bool `json:"is_video"` - // List of all set stickers - Stickers []Sticker `json:"stickers,omitempty"` - // Optional. Sticker set thumbnail in the .WEBP, .TGS, or .WEBM format - Thumb *PhotoSize `json:"thumb,omitempty"` -} - -// SuccessfulPayment This object contains basic information about a successful payment. -// https://core.telegram.org/bots/api#successfulpayment -type SuccessfulPayment struct { - // Three-letter ISO 4217 currency code - Currency string `json:"currency"` - // Total price in the smallest units of the currency (integer, not float/double). For example, for a price of US$ 1.45 pass amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). - TotalAmount int64 `json:"total_amount"` - // Bot specified invoice payload - InvoicePayload string `json:"invoice_payload"` - // Optional. Identifier of the shipping option chosen by the user - ShippingOptionId string `json:"shipping_option_id,omitempty"` - // Optional. Order information provided by the user - OrderInfo *OrderInfo `json:"order_info,omitempty"` - // Telegram payment identifier - TelegramPaymentChargeId string `json:"telegram_payment_charge_id"` - // Provider payment identifier - ProviderPaymentChargeId string `json:"provider_payment_charge_id"` -} - -// Update This object represents an incoming update. -// At most one of the optional parameters can be present in any given update. -// https://core.telegram.org/bots/api#update -type Update struct { - // The update's unique identifier. Update identifiers start from a certain positive number and increase sequentially. This ID becomes especially handy if you're using webhooks, since it allows you to ignore repeated updates or to restore the correct update sequence, should they get out of order. If there are no new updates for at least a week, then identifier of the next update will be chosen randomly instead of sequentially. - UpdateId int64 `json:"update_id"` - // Optional. New incoming message of any kind - text, photo, sticker, etc. - Message *Message `json:"message,omitempty"` - // Optional. New version of a message that is known to the bot and was edited - EditedMessage *Message `json:"edited_message,omitempty"` - // Optional. New incoming channel post of any kind - text, photo, sticker, etc. - ChannelPost *Message `json:"channel_post,omitempty"` - // Optional. New version of a channel post that is known to the bot and was edited - EditedChannelPost *Message `json:"edited_channel_post,omitempty"` - // Optional. New incoming inline query - InlineQuery *InlineQuery `json:"inline_query,omitempty"` - // Optional. The result of an inline query that was chosen by a user and sent to their chat partner. Please see our documentation on the feedback collecting for details on how to enable these updates for your bot. - ChosenInlineResult *ChosenInlineResult `json:"chosen_inline_result,omitempty"` - // Optional. New incoming callback query - CallbackQuery *CallbackQuery `json:"callback_query,omitempty"` - // Optional. New incoming shipping query. Only for invoices with flexible price - ShippingQuery *ShippingQuery `json:"shipping_query,omitempty"` - // Optional. New incoming pre-checkout query. Contains full information about checkout - PreCheckoutQuery *PreCheckoutQuery `json:"pre_checkout_query,omitempty"` - // Optional. New poll state. Bots receive only updates about stopped polls and polls, which are sent by the bot - Poll *Poll `json:"poll,omitempty"` - // Optional. A user changed their answer in a non-anonymous poll. Bots receive new votes only in polls that were sent by the bot itself. - PollAnswer *PollAnswer `json:"poll_answer,omitempty"` - // Optional. The bot's chat member status was updated in a chat. For private chats, this update is received only when the bot is blocked or unblocked by the user. - MyChatMember *ChatMemberUpdated `json:"my_chat_member,omitempty"` - // Optional. A chat member's status was updated in a chat. The bot must be an administrator in the chat and must explicitly specify "chat_member" in the list of allowed_updates to receive these updates. - ChatMember *ChatMemberUpdated `json:"chat_member,omitempty"` - // Optional. A request to join the chat has been sent. The bot must have the can_invite_users administrator right in the chat to receive these updates. - ChatJoinRequest *ChatJoinRequest `json:"chat_join_request,omitempty"` -} - -// User This object represents a Telegram user or bot. -// https://core.telegram.org/bots/api#user -type User struct { - // Unique identifier for this user or bot. This number may have more than 32 significant bits and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a 64-bit integer or double-precision float type are safe for storing this identifier. - Id int64 `json:"id"` - // True, if this user is a bot - IsBot bool `json:"is_bot"` - // User's or bot's first name - FirstName string `json:"first_name"` - // Optional. User's or bot's last name - LastName string `json:"last_name,omitempty"` - // Optional. User's or bot's username - Username string `json:"username,omitempty"` - // Optional. IETF language tag of the user's language - LanguageCode string `json:"language_code,omitempty"` - // Optional. True, if this user is a Telegram Premium user - IsPremium bool `json:"is_premium,omitempty"` - // Optional. True, if this user added the bot to the attachment menu - AddedToAttachmentMenu bool `json:"added_to_attachment_menu,omitempty"` - // Optional. True, if the bot can be invited to groups. Returned only in getMe. - CanJoinGroups bool `json:"can_join_groups,omitempty"` - // Optional. True, if privacy mode is disabled for the bot. Returned only in getMe. - CanReadAllGroupMessages bool `json:"can_read_all_group_messages,omitempty"` - // Optional. True, if the bot supports inline queries. Returned only in getMe. - SupportsInlineQueries bool `json:"supports_inline_queries,omitempty"` -} - -// UserProfilePhotos This object represent a user's profile pictures. -// https://core.telegram.org/bots/api#userprofilephotos -type UserProfilePhotos struct { - // Total number of profile pictures the target user has - TotalCount int64 `json:"total_count"` - // Requested profile pictures (in up to 4 sizes each) - Photos [][]PhotoSize `json:"photos,omitempty"` -} - -// Venue This object represents a venue. -// https://core.telegram.org/bots/api#venue -type Venue struct { - // Venue location. Can't be a live location - Location Location `json:"location"` - // Name of the venue - Title string `json:"title"` - // Address of the venue - Address string `json:"address"` - // Optional. Foursquare identifier of the venue - FoursquareId string `json:"foursquare_id,omitempty"` - // Optional. Foursquare type of the venue. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) - FoursquareType string `json:"foursquare_type,omitempty"` - // Optional. Google Places identifier of the venue - GooglePlaceId string `json:"google_place_id,omitempty"` - // Optional. Google Places type of the venue. (See supported types.) - GooglePlaceType string `json:"google_place_type,omitempty"` -} - -// Video This object represents a video file. -// https://core.telegram.org/bots/api#video -type Video struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Video width as defined by sender - Width int64 `json:"width"` - // Video height as defined by sender - Height int64 `json:"height"` - // Duration of the video in seconds as defined by sender - Duration int64 `json:"duration"` - // Optional. Video thumbnail - Thumb *PhotoSize `json:"thumb,omitempty"` - // Optional. Original filename as defined by sender - FileName string `json:"file_name,omitempty"` - // Optional. MIME type of the file as defined by sender - MimeType string `json:"mime_type,omitempty"` - // Optional. File size in bytes. It can be bigger than 2^31 and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this value. - FileSize int64 `json:"file_size,omitempty"` -} - -// VideoChatEnded This object represents a service message about a video chat ended in the chat. -// https://core.telegram.org/bots/api#videochatended -type VideoChatEnded struct { - // Video chat duration in seconds - Duration int64 `json:"duration"` -} - -// VideoChatParticipantsInvited This object represents a service message about new members invited to a video chat. -// https://core.telegram.org/bots/api#videochatparticipantsinvited -type VideoChatParticipantsInvited struct { - // New members that were invited to the video chat - Users []User `json:"users,omitempty"` -} - -// VideoChatScheduled This object represents a service message about a video chat scheduled in the chat. -// https://core.telegram.org/bots/api#videochatscheduled -type VideoChatScheduled struct { - // Point in time (Unix timestamp) when the video chat is supposed to be started by a chat administrator - StartDate int64 `json:"start_date"` -} - -// VideoChatStarted This object represents a service message about a video chat started in the chat. Currently holds no information. -// https://core.telegram.org/bots/api#videochatstarted -type VideoChatStarted struct{} - -// VideoNote This object represents a video message (available in Telegram apps as of v.4.0). -// https://core.telegram.org/bots/api#videonote -type VideoNote struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Video width and height (diameter of the video message) as defined by sender - Length int64 `json:"length"` - // Duration of the video in seconds as defined by sender - Duration int64 `json:"duration"` - // Optional. Video thumbnail - Thumb *PhotoSize `json:"thumb,omitempty"` - // Optional. File size in bytes - FileSize int64 `json:"file_size,omitempty"` -} - -// Voice This object represents a voice note. -// https://core.telegram.org/bots/api#voice -type Voice struct { - // Identifier for this file, which can be used to download or reuse the file - FileId string `json:"file_id"` - // Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. - FileUniqueId string `json:"file_unique_id"` - // Duration of the audio in seconds as defined by sender - Duration int64 `json:"duration"` - // Optional. MIME type of the file as defined by sender - MimeType string `json:"mime_type,omitempty"` - // Optional. File size in bytes. It can be bigger than 2^31 and some programming languages may have difficulty/silent defects in interpreting it. But it has at most 52 significant bits, so a signed 64-bit integer or double-precision float type are safe for storing this value. - FileSize int64 `json:"file_size,omitempty"` -} - -// WebAppData Describes data sent from a Web App to the bot. -// https://core.telegram.org/bots/api#webappdata -type WebAppData struct { - // The data. Be aware that a bad client can send arbitrary data in this field. - Data string `json:"data"` - // Text of the web_app keyboard button from which the Web App was opened. Be aware that a bad client can send arbitrary data in this field. - ButtonText string `json:"button_text"` -} - -// WebAppInfo Describes a Web App. -// https://core.telegram.org/bots/api#webappinfo -type WebAppInfo struct { - // An HTTPS URL of a Web App to be opened with additional data as specified in Initializing Web Apps - Url string `json:"url"` -} - -// WebhookInfo Describes the current status of a webhook. -// https://core.telegram.org/bots/api#webhookinfo -type WebhookInfo struct { - // Webhook URL, may be empty if webhook is not set up - Url string `json:"url"` - // True, if a custom certificate was provided for webhook certificate checks - HasCustomCertificate bool `json:"has_custom_certificate"` - // Number of updates awaiting delivery - PendingUpdateCount int64 `json:"pending_update_count"` - // Optional. Currently used webhook IP address - IpAddress string `json:"ip_address,omitempty"` - // Optional. Unix time for the most recent error that happened when trying to deliver an update via webhook - LastErrorDate int64 `json:"last_error_date,omitempty"` - // Optional. Error message in human-readable format for the most recent error that happened when trying to deliver an update via webhook - LastErrorMessage string `json:"last_error_message,omitempty"` - // Optional. Unix time of the most recent error that happened when trying to synchronize available updates with Telegram datacenters - LastSynchronizationErrorDate int64 `json:"last_synchronization_error_date,omitempty"` - // Optional. The maximum allowed number of simultaneous HTTPS connections to the webhook for update delivery - MaxConnections int64 `json:"max_connections,omitempty"` - // Optional. A list of update types the bot is subscribed to. Defaults to all update types except chat_member - AllowedUpdates []string `json:"allowed_updates,omitempty"` -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/request.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/request.go deleted file mode 100644 index 0e4f70f..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/request.go +++ /dev/null @@ -1,258 +0,0 @@ -package gotgbot - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "mime/multipart" - "net/http" - "strings" - "time" -) - -const ( - // DefaultAPIURL is the default telegram API URL. - DefaultAPIURL = "https://api.telegram.org" - // DefaultTimeout is the default timeout to be set for all requests. - DefaultTimeout = time.Second * 5 -) - -type BotClient interface { - // RequestWithContext submits a POST HTTP request a bot API instance. - RequestWithContext(ctx context.Context, method string, params map[string]string, data map[string]NamedReader, opts *RequestOpts) (json.RawMessage, error) - // TimeoutContext calculates the required timeout contect required given the passed RequestOpts, and any default opts defined by the BotClient. - TimeoutContext(opts *RequestOpts) (context.Context, context.CancelFunc) - // GetAPIURL gets the URL of the API the bot is interacting with. - GetAPIURL() string - // GetToken gets the current bots' token. - GetToken() string -} - -type BaseBotClient struct { - // Token stores the bot's secret token obtained from t.me/BotFather, and used to interact with telegram's API. - Token string - // Client is the HTTP Client used for all HTTP requests made for this bot. - Client http.Client - // UseTestEnvironment defines whether this bot was created to run on telegram's test environment. - // Enabling this uses a slightly different API path. - // See https://core.telegram.org/bots/webapps#using-bots-in-the-test-environment for more details. - UseTestEnvironment bool - // The default request opts for this bot instance. - DefaultRequestOpts *RequestOpts -} - -type Response struct { - // Ok: if true, request was successful, and result can be found in the Result field. - // If false, error can be explained in the Description. - Ok bool `json:"ok"` - // Result: result of requests (if Ok) - Result json.RawMessage `json:"result"` - // ErrorCode: Integer error code of request. Subject to change in the future. - ErrorCode int `json:"error_code"` - // Description: contains a human readable description of the error result. - Description string `json:"description"` - // Parameters: Optional extra data which can help automatically handle the error. - Parameters *ResponseParameters `json:"parameters"` -} - -type TelegramError struct { - Method string - Params map[string]string - Code int - Description string -} - -func (t *TelegramError) Error() string { - return fmt.Sprintf("unable to %s: %s", t.Method, t.Description) -} - -type NamedReader interface { - Name() string - io.Reader -} - -type NamedFile struct { - File io.Reader - FileName string -} - -func (nf NamedFile) Read(p []byte) (n int, err error) { - return nf.File.Read(p) -} - -func (nf NamedFile) Name() string { - return nf.FileName -} - -// RequestOpts defines any request-specific options used to interact with the telegram API. -type RequestOpts struct { - // Timeout for the HTTP request to the telegram API. - Timeout time.Duration - // Custom API URL to use for requests. - APIURL string -} - -// TimeoutContext returns the appropriate context for the current settings. -func (bot *BaseBotClient) TimeoutContext(opts *RequestOpts) (context.Context, context.CancelFunc) { - if opts != nil { - ctx, cancelFunc := timeoutFromOpts(opts) - if ctx != nil { - return ctx, cancelFunc - } - } - - if bot.DefaultRequestOpts != nil { - ctx, cancelFunc := timeoutFromOpts(bot.DefaultRequestOpts) - if ctx != nil { - return ctx, cancelFunc - } - } - - return context.WithTimeout(context.Background(), DefaultTimeout) -} - -func timeoutFromOpts(opts *RequestOpts) (context.Context, context.CancelFunc) { - // nothing? no timeout. - if opts == nil { - return nil, nil - } - - if opts.Timeout > 0 { - // > 0 timeout defined. - return context.WithTimeout(context.Background(), opts.Timeout) - - } else if opts.Timeout < 0 { - // < 0 no timeout; infinite. - return context.Background(), func() {} - } - // 0 == nothing defined, use defaults. - return nil, nil -} - -// RequestWithContext allows sending a POST request to the telegram bot API with an existing context. -// - ctx: the timeout contexts to be used. -// - method: the telegram API method to call. -// - params: map of parameters to be sending to the telegram API. eg: chat_id, user_id, etc. -// - data: map of any files to be sending to the telegram API. -// - opts: request opts to use. Note: Timeout opts are ignored when used in RequestWithContext. Timeout handling is the -// responsibility of the caller/context owner. -func (bot *BaseBotClient) RequestWithContext(ctx context.Context, method string, params map[string]string, data map[string]NamedReader, opts *RequestOpts) (json.RawMessage, error) { - b := &bytes.Buffer{} - - var contentType string - // Check if there are any files to upload. If yes, use multipart; else, use JSON. - if len(data) > 0 { - var err error - contentType, err = fillBuffer(b, params, data) - if err != nil { - return nil, fmt.Errorf("failed to fill buffer with parameters and file data: %w", err) - } - } else { - contentType = "application/json" - err := json.NewEncoder(b).Encode(params) - if err != nil { - return nil, fmt.Errorf("failed to encode parameters as JSON: %w", err) - } - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, bot.methodEnpoint(method, opts), b) - if err != nil { - return nil, fmt.Errorf("failed to build POST request to %s: %w", method, err) - } - - req.Header.Set("Content-Type", contentType) - - resp, err := bot.Client.Do(req) - if err != nil { - return nil, fmt.Errorf("failed to execute POST request to %s: %w", method, err) - } - defer resp.Body.Close() - - var r Response - if err = json.NewDecoder(resp.Body).Decode(&r); err != nil { - return nil, fmt.Errorf("failed to decode POST request to %s: %w", method, err) - } - - if !r.Ok { - return nil, &TelegramError{ - Method: method, - Params: params, - Code: r.ErrorCode, - Description: r.Description, - } - } - - return r.Result, nil -} - -func fillBuffer(b *bytes.Buffer, params map[string]string, data map[string]NamedReader) (string, error) { - w := multipart.NewWriter(b) - - for k, v := range params { - err := w.WriteField(k, v) - if err != nil { - return "", fmt.Errorf("failed to write multipart field %s with value %s: %w", k, v, err) - } - } - - for field, file := range data { - fileName := file.Name() - if fileName == "" { - fileName = field - } - - part, err := w.CreateFormFile(field, fileName) - if err != nil { - return "", fmt.Errorf("failed to create form file for field %s and fileName %s: %w", field, fileName, err) - } - - _, err = io.Copy(part, file) - if err != nil { - return "", fmt.Errorf("failed to copy file contents of field %s to form: %w", field, err) - } - } - - if err := w.Close(); err != nil { - return "", fmt.Errorf("failed to close multipart form writer: %w", err) - } - - return w.FormDataContentType(), nil -} - -func getCleanAPIURL(url string) string { - if url == "" { - return DefaultAPIURL - } - // Trim suffix to ensure consistent output - return strings.TrimSuffix(url, "/") -} - -// GetAPIURL returns the currently used API endpoint. -func (bot *BaseBotClient) GetAPIURL() string { - return bot.getAPIURL(nil) -} - -// GetToken returns the currently used token. -func (bot *BaseBotClient) GetToken() string { - return bot.Token -} - -// getAPIURL returns the currently used API endpoint. -func (bot *BaseBotClient) getAPIURL(opts *RequestOpts) string { - if opts != nil && opts.APIURL != "" { - return getCleanAPIURL(opts.APIURL) - } - if bot.DefaultRequestOpts != nil && bot.DefaultRequestOpts.APIURL != "" { - return getCleanAPIURL(bot.DefaultRequestOpts.APIURL) - } - return DefaultAPIURL -} - -func (bot *BaseBotClient) methodEnpoint(method string, opts *RequestOpts) string { - if bot.UseTestEnvironment { - return fmt.Sprintf("%s/bot%s/test/%s", bot.getAPIURL(opts), bot.Token, method) - } - return fmt.Sprintf("%s/bot%s/%s", bot.getAPIURL(opts), bot.Token, method) -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/sender.go b/vendor/github.com/PaulSonOfLars/gotgbot/v2/sender.go deleted file mode 100644 index f91e5d2..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/sender.go +++ /dev/null @@ -1,132 +0,0 @@ -package gotgbot - -// Sender is a merge of the User and SenderChat fields of a message, to provide easier interaction with -// message senders from the telegram API. -type Sender struct { - // The User defined as the sender (if applicable) - User *User - // The Chat defined as the sender (if applicable) - Chat *Chat - // Whether the sender was an automatic forward; eg, a linked channel. - IsAutomaticForward bool - // The location that was sent to. Required to determine if the sender is a linked channel, an anonymous channel, - // or an anonymous admin. - ChatId int64 - // The custom admin title of the anonymous group administrator sender. - // Only available if IsAnonymousAdmin is true. - AuthorSignature string -} - -// GetSender populates the relevant fields of a Sender struct given a message. -func (m Message) GetSender() *Sender { - return &Sender{ - User: m.From, - Chat: m.SenderChat, - IsAutomaticForward: m.IsAutomaticForward, - ChatId: m.Chat.Id, - AuthorSignature: m.AuthorSignature, - } -} - -// Id determines the sender ID. -// When a message is being sent by a chat/channel, telegram usually populates the User field with dummy values. -// For this reason, we prefer to return the Chat.Id if it is available, rather than a dummy User.Id. -func (s Sender) Id() int64 { - if s.Chat != nil { - return s.Chat.Id - } - if s.User != nil { - return s.User.Id - } - return 0 -} - -// Username determines the sender username. -func (s Sender) Username() string { - if s.Chat != nil { - return s.Chat.Username - } - if s.User != nil { - return s.User.Username - } - return "" -} - -// Name determines the name of the sender. -// This is: -// - Chat.Title for a Chat. -// - User.FirstName + User.LastName for a User (the full name). -func (s Sender) Name() string { - if s.Chat != nil { - return s.Chat.Title - } - if s.User != nil { - if s.User.LastName == "" { - return s.User.FirstName - } - return s.User.FirstName + " " + s.User.LastName - } - return "" -} - -// FirstName determines the firstname of the sender. -// This is: -// - Chat.Title for a Chat. -// - User.FirstName for a User. -func (s Sender) FirstName() string { - if s.Chat != nil { - return s.Chat.Title - } - if s.User != nil { - return s.User.FirstName - } - return "" -} - -// LastName determines the firstname of the sender. -// This is: -// - empty for a Chat. -// - User.LastName for a User. -func (s Sender) LastName() string { - if s.Chat != nil { - return "" // empty; we define the "title" as being a firstname, so there is no lastname. - } - if s.User != nil { - return s.User.LastName - } - return "" -} - -// IsUser returns true if the Sender is a User (including bot). -func (s Sender) IsUser() bool { - return s.Chat == nil && s.User != nil -} - -// IsBot returns true if the Sender is a bot. -// Returns false if the user is a bot setup by telegram for backwards compatibility with -// the sender_chat fields. -func (s Sender) IsBot() bool { - return s.Chat == nil && s.User != nil && s.User.IsBot -} - -// IsAnonymousAdmin returns true if the Sender is an anonymous admin sending to a group. -// For channel posts in a channel, see IsChannelPost. -func (s Sender) IsAnonymousAdmin() bool { - return s.Chat != nil && s.Chat.Id == s.ChatId && s.Chat.Type != "channel" -} - -// IsChannelPost returns true if the Sender is a channel admin posting to that same channel. -func (s Sender) IsChannelPost() bool { - return s.Chat != nil && s.Chat.Id == s.ChatId && s.Chat.Type == "channel" -} - -// IsAnonymousChannel returns true if the Sender is an anonymous channel sending to a group. -// For channel admins posting in their own channel, see IsChannelPost. -func (s Sender) IsAnonymousChannel() bool { - return s.Chat != nil && s.Chat.Id != s.ChatId && !s.IsAutomaticForward && s.Chat.Type == "channel" -} - -// IsLinkedChannel returns true if the Sender is a linked channel sending to the group it is linked to. -func (s Sender) IsLinkedChannel() bool { - return s.Chat != nil && s.Chat.Id != s.ChatId && s.IsAutomaticForward -} diff --git a/vendor/github.com/PaulSonOfLars/gotgbot/v2/spec_commit b/vendor/github.com/PaulSonOfLars/gotgbot/v2/spec_commit deleted file mode 100644 index 5583886..0000000 --- a/vendor/github.com/PaulSonOfLars/gotgbot/v2/spec_commit +++ /dev/null @@ -1 +0,0 @@ -53170b1db6273b262dda13ab0171eb65a7d51f77 \ No newline at end of file diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d..0000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index 52ccb5a..0000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,18 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Amazon.com, Inc -Damian Gryski -Eric Buth -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Klaus Post -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index ea6524d..0000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,41 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Alex Legg -Damian Gryski -Eric Buth -Jan Mercl <0xjnml@gmail.com> -Jonathan Swinney -Kai Backman -Klaus Post -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea1287..0000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 23c6e26..0000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f6..0000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s deleted file mode 100644 index 7a3ead1..0000000 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - R2 scratch -// - R3 scratch -// - R4 length or x -// - R5 offset -// - R6 &src[s] -// - R7 &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. -// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. -TEXT ·decode(SB), NOSPLIT, $56-56 - // Initialize R6, R7 and R8-R13. - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R7 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R6 - MOVD R11, R13 - ADD R12, R13, R13 - -loop: - // for s < len(src) - CMP R13, R6 - BEQ end - - // R4 = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBU (R6), R4 - MOVW R4, R3 - ANDW $3, R3 - MOVW $1, R1 - CMPW R1, R3 - BGE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - MOVW $60, R1 - LSRW $2, R4, R4 - CMPW R4, R1 - BLS tagLit60Plus - - // case x < 60: - // s++ - ADD $1, R6, R6 - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that R4 == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // R4 can hold 64 bits, so the increment cannot overflow. - ADD $1, R4, R4 - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // R2 = len(dst) - d - // R3 = len(src) - s - MOVD R10, R2 - SUB R7, R2, R2 - MOVD R13, R3 - SUB R6, R3, R3 - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMP $16, R4 - BGT callMemmove - CMP $16, R2 - BLT callMemmove - CMP $16, R3 - BLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R6), (R14, R15) - STP (R14, R15), 0(R7) - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMP R2, R4 - BGT errCorrupt - CMP R3, R4 - BGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVD R7, 8(RSP) - MOVD R6, 16(RSP) - MOVD R4, 24(RSP) - MOVD R7, 32(RSP) - MOVD R6, 40(RSP) - MOVD R4, 48(RSP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVD 32(RSP), R7 - MOVD 40(RSP), R6 - MOVD 48(RSP), R4 - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R13 - ADD R12, R13, R13 - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADD R4, R6, R6 - SUB $58, R6, R6 - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // case x == 60: - MOVW $61, R1 - CMPW R1, R4 - BEQ tagLit61 - BGT tagLit62Plus - - // x = uint32(src[s-1]) - MOVBU -1(R6), R4 - B doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVHU -2(R6), R4 - B doLit - -tagLit62Plus: - CMPW $62, R4 - BHI tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVHU -3(R6), R4 - MOVBU -1(R6), R3 - ORR R3<<16, R4 - B doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVWU -4(R6), R4 - B doLit - - // The code above handles literal tags. - // ---------------------------------------- - // The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADD $5, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-5])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVWU -4(R6), R5 - B doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADD $3, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-3])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVHU -2(R6), R5 - B doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - R3 == src[s] & 0x03 - // - R4 == src[s] - CMP $2, R3 - BEQ tagCopy2 - BGT tagCopy4 - - // case tagCopy1: - // s += 2 - ADD $2, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVD R4, R5 - AND $0xe0, R5 - MOVBU -1(R6), R3 - ORR R5<<3, R3, R5 - - // length = 4 + int(src[s-2])>>2&0x7 - MOVD $7, R1 - AND R4>>2, R1, R4 - ADD $4, R4, R4 - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - R4 == length && R4 > 0 - // - R5 == offset - - // if offset <= 0 { etc } - MOVD $0, R1 - CMP R1, R5 - BLE errCorrupt - - // if d < offset { etc } - MOVD R7, R3 - SUB R8, R3, R3 - CMP R5, R3 - BLT errCorrupt - - // if length > len(dst)-d { etc } - MOVD R10, R3 - SUB R7, R3, R3 - CMP R3, R4 - BGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVD R10, R14 - SUB R7, R14, R14 - MOVD R7, R15 - SUB R5, R15, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMP $16, R4 - BGT slowForwardCopy - CMP $8, R5 - BLT slowForwardCopy - CMP $16, R14 - BLT slowForwardCopy - MOVD 0(R15), R2 - MOVD R2, 0(R7) - MOVD 8(R15), R3 - MOVD R3, 8(R7) - ADD R4, R7, R7 - B loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUB $10, R14, R14 - CMP R14, R4 - BGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMP $8, R5 - BGE fixUpSlowForwardCopy - MOVD (R15), R3 - MOVD R3, (R7) - SUB R5, R4, R4 - ADD R5, R7, R7 - ADD R5, R5, R5 - B makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R7 being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R7 to R2 so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVD R7, R2 - ADD R4, R7, R7 - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - MOVD $0, R1 - CMP R1, R4 - BLE loop - MOVD (R15), R3 - MOVD R3, (R2) - ADD $8, R15, R15 - ADD $8, R2, R2 - SUB $8, R4, R4 - B finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), R3 - MOVB R3, (R7) - ADD $1, R15, R15 - ADD $1, R7, R7 - SUB $1, R4, R4 - CBNZ R4, verySlowForwardCopy - B loop - - // The code above handles copy tags. - // ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMP R10, R7 - BNE errCorrupt - - // return 0 - MOVD $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVD $1, R2 - MOVD R2, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go deleted file mode 100644 index 7082b34..0000000 --- a/vendor/github.com/golang/snappy/decode_asm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 2f672be..0000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 7f23657..0000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979..0000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s deleted file mode 100644 index f8d54ad..0000000 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - R3 len(lit) -// - R4 n -// - R6 return value -// - R8 &dst[i] -// - R10 &lit[0] -// -// The 32 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $32-56 - MOVD dst_base+0(FP), R8 - MOVD lit_base+24(FP), R10 - MOVD lit_len+32(FP), R3 - MOVD R3, R6 - MOVW R3, R4 - SUBW $1, R4, R4 - - CMPW $60, R4 - BLT oneByte - CMPW $256, R4 - BLT twoBytes - -threeBytes: - MOVD $0xf4, R2 - MOVB R2, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - ADD $3, R6, R6 - B memmove - -twoBytes: - MOVD $0xf0, R2 - MOVB R2, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - ADD $2, R6, R6 - B memmove - -oneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - ADD $1, R6, R6 - -memmove: - MOVD R6, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - R3 length -// - R7 &dst[0] -// - R8 &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVD dst_base+0(FP), R8 - MOVD R8, R7 - MOVD offset+24(FP), R11 - MOVD length+32(FP), R3 - -loop0: - // for length >= 68 { etc } - CMPW $68, R3 - BLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $64, R3, R3 - B loop0 - -step1: - // if length > 64 { etc } - CMP $64, R3 - BLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $60, R3, R3 - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMP $12, R3 - BGE step3 - CMPW $2048, R11 - BGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $3, R11, R11 - AND $0xe0, R11, R11 - SUB $4, R3, R3 - LSLW $2, R3 - AND $0xff, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUB $1, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - R6 &src[0] -// - R7 &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVD src_base+0(FP), R6 - MOVD src_len+8(FP), R14 - MOVD i+24(FP), R15 - MOVD j+32(FP), R7 - ADD R6, R14, R14 - ADD R6, R15, R15 - ADD R6, R7, R7 - MOVD R14, R13 - SUB $8, R13, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI cmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE bsf - ADD $8, R15, R15 - ADD $8, R7, R7 - B cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS extendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE extendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - R3 . . -// - R4 . . -// - R5 64 shift -// - R6 72 &src[0], tableSize -// - R7 80 &src[s] -// - R8 88 &dst[d] -// - R9 96 sLimit -// - R10 . &src[nextEmit] -// - R11 104 prevHash, currHash, nextHash, offset -// - R12 112 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 120 candidate -// - R16 . hash constant, 0x1e35a7bd -// - R17 . &table -// - . 128 table -// -// The second column (64, 72, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. -TEXT ·encodeBlock(SB), 0, $32896-56 - MOVD dst_base+0(FP), R8 - MOVD src_base+24(FP), R7 - MOVD src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVD $24, R5 - MOVD $256, R6 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - MOVD $16384, R2 - CMP R2, R6 - BGE varTable - CMP R14, R6 - BGE varTable - SUB $1, R5, R5 - LSL $1, R6, R6 - B calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each - // iterations writes 64 bytes, so we can do only tableSize/32 writes - // instead of the 2048 writes that would zero-initialize all of table's - // 32768 bytes. This clear could overrun the first tableSize elements, but - // it won't overrun the allocated stack size. - ADD $128, RSP, R17 - MOVD R17, R4 - - // !!! R6 = &src[tableSize] - ADD R6<<1, R17, R6 - -memclr: - STP.P (ZR, ZR), 64(R4) - STP (ZR, ZR), -48(R4) - STP (ZR, ZR), -32(R4) - STP (ZR, ZR), -16(R4) - CMP R4, R6 - BHI memclr - - // !!! R6 = &src[0] - MOVD R7, R6 - - // sLimit := len(src) - inputMargin - MOVD R14, R9 - SUB $15, R9, R9 - - // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't - // change for the rest of the function. - MOVD R5, 64(RSP) - MOVD R6, 72(RSP) - MOVD R9, 96(RSP) - - // nextEmit := 0 - MOVD R6, R10 - - // s := 1 - ADD $1, R7, R7 - - // nextHash := hash(load32(src, s), shift) - MOVW 0(R7), R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - -outer: - // for { etc } - - // skip := 32 - MOVD $32, R12 - - // nextS := s - MOVD R7, R13 - - // candidate := 0 - MOVD $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVD R13, R7 - - // bytesBetweenHashLookups := skip >> 5 - MOVD R12, R14 - LSR $5, R14, R14 - - // nextS = s + bytesBetweenHashLookups - ADD R14, R13, R13 - - // skip += bytesBetweenHashLookups - ADD R14, R12, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVD R13, R3 - SUB R6, R3, R3 - CMP R9, R3 - BHI emitRemainder - - // candidate = int(table[nextHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[nextHash] = uint16(s) - MOVD R7, R3 - SUB R6, R3, R3 - - MOVH R3, 0(R17)(R11<<1) - - // nextHash = hash(load32(src, nextS), shift) - MOVW 0(R13), R11 - MULW R16, R11 - LSRW R5, R11, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVW 0(R7), R3 - MOVW (R6)(R15), R4 - CMPW R4, R3 - BNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVD R7, R3 - SUB R10, R3, R3 - CMP $16, R3 - BLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVW R3, R4 - SUBW $1, R4, R4 - - MOVW $60, R2 - CMPW R2, R4 - BLT inlineEmitLiteralOneByte - MOVW $256, R2 - CMPW R2, R4 - BLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVD $0xf4, R1 - MOVB R1, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVD $0xf0, R1 - MOVB R1, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - ADD $128, RSP, R17 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - B inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB R3, R4 - SUBW $1, R4, R4 - AND $0xff, R4, R4 - LSLW $2, R4, R4 - MOVB R4, (R8) - ADD $1, R8, R8 - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R10), (R0, R1) - STP (R0, R1), 0(R8) - ADD R3, R8, R8 - -inner1: - // for { etc } - - // base := s - MOVD R7, R12 - - // !!! offset := base - candidate - MOVD R12, R11 - SUB R15, R11, R11 - SUB R6, R11, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVD src_len+32(FP), R14 - ADD R6, R14, R14 - - // !!! R13 = &src[len(src) - 8] - MOVD R14, R13 - SUB $8, R13, R13 - - // !!! R15 = &src[candidate + 4] - ADD $4, R15, R15 - ADD R6, R15, R15 - - // !!! s += 4 - ADD $4, R7, R7 - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI inlineExtendMatchCmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchBSF - ADD $8, R15, R15 - ADD $8, R7, R7 - B inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - B inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS inlineExtendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVD R7, R3 - SUB R12, R3, R3 - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 - BLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $64, R3, R3 - B inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - MOVW $64, R2 - CMPW R2, R3 - BLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $60, R3, R3 - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - MOVW $12, R2 - CMPW R2, R3 - BGE inlineEmitCopyStep3 - MOVW $2048, R2 - CMPW R2, R11 - BGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $8, R11, R11 - LSLW $5, R11, R11 - SUBW $4, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - B inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBW $1, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVD R7, R10 - - // if s >= sLimit { goto emitRemainder } - MOVD R7, R3 - SUB R6, R3, R3 - CMP R3, R9 - BLS emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVD -1(R7), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // table[prevHash] = uint16(s-1) - MOVD R7, R3 - SUB R6, R3, R3 - SUB $1, R3, R3 - - MOVHU R3, 0(R17)(R11<<1) - - // currHash := hash(uint32(x>>8), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // candidate = int(table[currHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[currHash] = uint16(s) - ADD $1, R3, R3 - MOVHU R3, 0(R17)(R11<<1) - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15), R4 - CMPW R4, R14 - BEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // s++ - ADD $1, R7, R7 - - // break out of the inner1 for loop, i.e. continue the outer loop. - B outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVD src_len+32(FP), R3 - ADD R6, R3, R3 - CMP R3, R10 - BEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVD R8, 8(RSP) - MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD R10, 32(RSP) - SUB R10, R3, R3 - MOVD R3, 40(RSP) - MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVD R8, 88(RSP) - CALL ·emitLiteral(SB) - MOVD 88(RSP), R8 - - // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVD 56(RSP), R1 - ADD R1, R8, R8 - -encodeBlockEnd: - MOVD dst_base+0(FP), R3 - SUB R3, R8, R8 - MOVD R8, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go deleted file mode 100644 index 107c1e7..0000000 --- a/vendor/github.com/golang/snappy/encode_asm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index 296d7f0..0000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692e..0000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 4024335..0000000 --- a/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore deleted file mode 100644 index d31b378..0000000 --- a/vendor/github.com/klauspost/compress/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/s2/cmd/_s2sx/sfx-exe - -# Linux perf files -perf.data -perf.data.old - -# gdb history -.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index 0af08e6..0000000 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,141 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com -before: - hooks: - - ./gen.sh - - go install mvdan.cc/garble@latest - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - gobinary: garble - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d5574..0000000 --- a/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index ad5c63a..0000000 --- a/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,560 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -
- See Details -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. -
- -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) - -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -``` - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692..0000000 --- a/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324d..0000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb39..0000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index 43e4636..0000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d..0000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 6f34191..0000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - default: - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - return s.bw.close() -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - br.init(s.br.unread()) - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbad..0000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff9422..0000000 --- a/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d2629..0000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c6..0000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index 504a7be..0000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index ec71f7a..0000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d..0000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 4d14542..0000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,730 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src) -} - -func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - err := bw.close() - return bw.out, err -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - var err error - idx := len(s.Out) - s.Out, err = s.compress1xDo(s.Out, toDo) - if err != nil { - return nil, err - } - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - var errs [4]error - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - if errs[i] != nil { - return nil, errs[i] - } - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 - } - // fake entry, strong barrier - huffNode0[0].count = 1 << 31 - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].nbBits++ - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { - n-- - } - huffNode[n+1].nbBits-- - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].nbBits-- - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index c0c48bd..0000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1159 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, err - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 8 - 8 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[0][:]) - copy(out[dstEvery:], buf[1][:]) - copy(out[dstEvery*2:], buf[2][:]) - copy(out[dstEvery*3:], buf[3][:]) - out = out[bufoff:] - decoded += bufoff * 4 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[0][:]) - copy(out[dstEvery:], buf[1][:]) - copy(out[dstEvery*2:], buf[2][:]) - copy(out[dstEvery*3:], buf[3][:]) - out = out[bufoff:] - decoded += bufoff * 4 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index 9f3e9f7..0000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,222 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index dd1a5ae..0000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,847 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 - - // Main loop -main_loop: - MOVQ SI, R8 - CMPQ R8, BX - SETGE DL - - // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ (R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 - - // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ 48(R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 - - // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ 96(R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 - - // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 - SUBQ $0x04, AX - MOVQ 144(R11), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 - - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 - - // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) - - // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 - - // Main loop -main_loop: - MOVQ BX, R8 - CMPQ R8, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 - - // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 - - // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 - - // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 - JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 - - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 - MOVQ DI, CX - SHRQ CL, R14 - - // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) - - // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exeeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exeeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exeeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exeeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 4f6f37c..0000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,295 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[0][:]) - copy(out[dstEvery:], buf[1][:]) - copy(out[dstEvery*2:], buf[2][:]) - copy(out[dstEvery*3:], buf[3][:]) - out = out[bufoff:] - decoded += bufoff * 4 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index e8ad17a..0000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - br byteReader - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.br.init(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c51..0000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579..0000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe..0000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a4..0000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6..0000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040..0000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 511bba6..0000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4..0000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 2263853..0000000 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.16 - diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index beb7fa8..0000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,439 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 97299d4..0000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off >= 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return b.off*8 + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 78b3c61..0000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 7eed729..0000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,721 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc - checkCRC []byte - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if cap(b.dataStorage) < cSize { - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) - } - case compModeFSE: - println("Reading table for", tableIndex(i)) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 12e8f6f..0000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,871 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 32 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Maybe even add a bigger margin. - b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e4..0000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 2ad0207..0000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" - "io/ioutil" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, nil - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(ioutil.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a24..0000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 5022e71..0000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - *h = Header{} - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch - } - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch size { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch fcsSize { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index d212f47..0000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,924 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - } - - frame *frameDec - - // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which is will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.current.b) > 0 { - dst = d.current.b - } - - dst, err := d.DecodeAll(b, dst[:0]) - if err == nil { - err = io.EOF - } - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if !d.o.ignoreChecksum && len(next.b) > 0 { - n, err := d.current.crc.Write(next.b) - if err == nil { - if n != len(next.b) { - d.current.err = io.ErrShortWrite - } - } - } - if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { - got := d.current.crc.Sum64() - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(got)) - if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { - if debugDecoder { - println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - println("CRC ok", tmp[:]) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err != nil { - return false - } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - }() - -decodeStream: - for { - var hist history - var hasErr bool - - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.checkCRC = nil - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - dec.err = err - } - var tmp [4]byte - copy(tmp[:], crc) - dec.checkCRC = tmp[:] - if debugDecoder { - println("found crc to check:", dec.checkCRC) - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - d.frame.history.b = frameHistCache -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index c70e6fa..0000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []dict - ignoreChecksum bool -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, *d) - } - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index a36ae83..0000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,122 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - //llEnc, ofEnc, mlEnc []*fseEncoder - offsets [3]int - content []byte -} - -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if !bytes.Equal(b[:4], dictMagic[:]) { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, err - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 15ae8ee..0000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,188 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - a := src[s:] - b := src[t:] - b = b[:len(a)] - end := int32((len(a) >> 3) << 3) - for i := int32(0); i < end; i += 8 { - if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { - return i + int32(bits.TrailingZeros64(diff)>>3) - } - } - - a = a[end:] - b = b[end:] - for i := range a { - if a[i] != b[i] { - return int32(i) + end - } - } - return int32(len(a)) + end -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 96028ec..0000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,558 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = 25000 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep)) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - _ = addLiterals - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - bestOf := func(a, b match) match { - if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { - return a - } - return b - } - const goodEnough = 100 - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - matchAt := func(offset int32, s int32, first uint32, rep int32) match { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} - } - if debugAsserts { - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m - } - - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) - - if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - continue - } - - s++ - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - // Long at s+1, s+2 - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) - if false { - // Short at s+3. - // Too often worse... - best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) - } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) - } - best = bestEnd - } - } - } - - if debugAsserts { - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - if best.rep > 0 { - s = best.s - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = uint32(best.rep) - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s - s = best.s + best.length - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - - } - break encodeLoop - } - // Index skipped... - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - switch best.rep { - case 2: - offset1, offset2 = offset2, offset1 - case 3: - offset1, offset2, offset3 = offset3, offset1, offset2 - } - cv = load6432(src, s) - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - s = best.s - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index c769f69..0000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1237 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index 7ff0c64..0000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1124 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f51ab52..0000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,898 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 3 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 - nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 7aaaedb..0000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst, err := fh.appendTo(tmp[:0]) - if err != nil { - return err - } - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.err = err - return err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst, _ = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst, err := fh.appendTo(dst) - if err != nil { - panic(err) - } - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - err := errIncompressible - oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: - panic(err) - } - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: - panic(err) - } - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index a7c5e1a..0000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,317 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: true, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - o.pad = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 16 << 20 - case SpeedBestCompression: - o.windowSize = 32 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// The encoder *may* choose to use no dictionary instead for certain payloads. -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index 9568a4b..0000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID *uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - default: - return err - case nil: - signature[0] = b[0] - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - default: - return err - case nil: - copy(signature[1:], b) - } - - if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if !bytes.Equal(signature[:], frameMagic) { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", frameMagic) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch size { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch fcsSize { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or very small window size. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - // Alloc with one additional block - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum if the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - - // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - if d.o.ignoreChecksum { - return nil - } - - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) - - if !bytes.Equal(tmp[:], want) { - if debugDecoder { - println("CRC Check Failed:", tmp[:], "!=", want) - } - return ErrCRCMismatch - } - if debugDecoder { - println("CRC ok", tmp[:]) - } - return nil -} - -// consumeCRC reads the checksum data if the frame has one. -func (d *frameDec) consumeCRC() error { - if d.HasCheckSum { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - } - - return nil -} - -// runDecoder will create a sync decoder that will decode a block of data. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.FrameContentSize != fcsUnknown { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)) > d.o.maxDecodedSize { - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 4ef7f5a..0000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) ([]byte, error) { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are nto stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst, nil -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a..0000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index c881d28..0000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index da32b44..0000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 332e51f..0000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326..0000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77..0000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21..0000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 28b4015..0000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.offsets.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - h.decoders = sequenceDecs{br: h.decoders.br} - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - } - } - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b5306..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 69aa3bb..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index 2c112a0..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,237 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index cea1785..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,216 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI - - // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX - JGE finalize - -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index 4d64a17..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,186 +0,0 @@ -// +build gc,!purego,!noasm - -#include "textflag.h" - -// Register allocation. -#define digest R1 -#define h R2 // Return value. -#define p R3 // Input pointer. -#define len R4 -#define nblocks R5 // len / 32. -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc \ - -// x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x \ - -#define mergeRound(x) \ - round0(x) \ - EOR x, h \ - MADD h, prime4, prime1, h \ - -// Update v[1-4] with 32-byte blocks. Assumes len >= 32. -#define blocksLoop() \ - LSR $5, len, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 32(p), (x1, x2) \ - round(v1, x1) \ - LDP -16(p), (x3, x4) \ - round(v2, x2) \ - SUB $1, nblocks \ - round(v3, x3) \ - round(v4, x4) \ - CBNZ nblocks, loop \ - -// The primes are repeated here to ensure that they're stored -// in a contiguous array, so we can load them with LDP. -DATA primes<> +0(SB)/8, $11400714785074694791 -DATA primes<> +8(SB)/8, $14029467366897019727 -DATA primes<>+16(SB)/8, $1609587929392839161 -DATA primes<>+24(SB)/8, $9650029242287828579 -DATA primes<>+32(SB)/8, $2870177450012600261 -GLOBL primes<>(SB), NOPTR+RODATA, $40 - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 - LDP b_base+0(FP), (p, len) - - LDP primes<> +0(SB), (prime1, prime2) - LDP primes<>+16(SB), (prime3, prime4) - MOVD primes<>+32(SB), prime5 - - CMP $32, len - CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } - BLO afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blocksLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(v1) - mergeRound(v2) - mergeRound(v3) - mergeRound(v4) - -afterLoop: - ADD len, h - - TBZ $4, len, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, len, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, len, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, len, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h - MUL prime1, h - -try1: - TBZ $0, len, end - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h - MUL prime1, h - -end: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -// -// Assumes len(b) >= 32. -TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 - LDP primes<>(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, len) - - blocksLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, len - MOVD len, ret+32(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index 1a1fac9..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 209cb4a..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,77 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb..0000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index df04472..0000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,491 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - // Check if space for literals - if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) - br.fill() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - - } - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index 7598c10..0000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,368 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index 27e7677..0000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4100 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_end - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_end - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index c3452bc..0000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174..0000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index 9e1baad..0000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - var header []byte - var n int - header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8..0000000 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 3eb3f1c..0000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,152 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" - "math/bits" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -// matchLen returns the maximum length. -// a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) - } - } - - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] - for i := range a { - if a[i] != b[i] { - return i + checked - } - } - return len(a) + checked -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/montanaflynn/stats/.gitignore b/vendor/github.com/montanaflynn/stats/.gitignore deleted file mode 100644 index e0a38e1..0000000 --- a/vendor/github.com/montanaflynn/stats/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -coverage.out -release-notes.txt -.directory -.chglog -.vscode \ No newline at end of file diff --git a/vendor/github.com/montanaflynn/stats/.travis.yml b/vendor/github.com/montanaflynn/stats/.travis.yml deleted file mode 100644 index 28118fb..0000000 --- a/vendor/github.com/montanaflynn/stats/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go -go: - - "1.7" - - "1.8" - - "1.9" - - "1.10" - - "1.11" - - "1.12" - - "1.13" - - "1.14" - - "1.15" - - "1.16" - - stable - - master -arch: - - amd64 - - arm64 -before_install: - - go get github.com/mattn/goveralls -script: - - go test -v -covermode=count -coverprofile=coverage.out -after_success: - - $GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci -notifications: - email: - recipients: - - montana@montanaflynn.me - on_success: change - on_failure: always diff --git a/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/vendor/github.com/montanaflynn/stats/CHANGELOG.md deleted file mode 100644 index ad842a5..0000000 --- a/vendor/github.com/montanaflynn/stats/CHANGELOG.md +++ /dev/null @@ -1,598 +0,0 @@ - - -## [v0.6.6](https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6) (2021-04-26) - -### Add - -* Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68)) -* Add latest versions of Go to test against - -### Use - -* Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64)) - - - -## [v0.6.5](https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5) (2021-02-21) - -### Add - -* Add Float64Data.Quartiles documentation - -### Update - -* Update changelog with v0.6.5 changes - - - -## [v0.6.4](https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4) (2021-02-21) - -### Add - -* Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60)) - -### Fix - -* Fix make release changelog command and add changelog history -* Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58)) - -### Update - -* Update changelog with v0.6.4 changes -* Update README.md links to CHANGELOG.md and DOCUMENTATION.md -* Update README.md and Makefile with new release commands -* Update changelog with v0.6.4 changes -* Update examples directory to include a README.md used for synopsis -* Update go.mod to include go version where modules are enabled by default -* Update changelog with v0.6.3 changes - - - -## [v0.6.3](https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3) (2020-02-18) - -### Add - -* Add creating and committing changelog to Makefile release directive -* Add release-notes.txt and .chglog directory to .gitignore - -### Update - -* Update exported tests to use import for better example documentation -* Update documentation using godoc2md -* Update changelog with v0.6.2 release - - - -## [v0.6.2](https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2) (2020-02-18) - -### Fix - -* Fix linting errcheck warnings in go benchmarks - -### Update - -* Update Makefile release directive to use correct release name - - - -## [v0.6.1](https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1) (2020-02-18) - -### Add - -* Add StableSample function signature to readme - -### Fix - -* Fix linting warnings for normal distribution functions formatting and tests - -### Update - -* Update documentation links and rename DOC.md to DOCUMENTATION.md -* Update README with link to pkg.go.dev reference and release section -* Update Makefile with new changelog, docs, and release directives -* Update DOC.md links to GitHub source code -* Update doc.go comment and add DOC.md package reference file -* Update changelog using git-chglog - - - -## [v0.6.0](https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0) (2020-02-17) - -### Add - -* Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56)) -* Add previous versions of Go to travis CI config -* Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51)) -* Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48)) -* Add doc.go file to show description and usage on godoc.org -* Add comments to new error and legacy error variables -* Add ExampleRound function to tests -* Add go.mod file for module support -* Add Sigmoid, SoftMax and Entropy methods and tests -* Add Entropy documentation, example and benchmarks -* Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44)) - -### Fix - -* Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47)) -* Fix AutoCorrelation name in comments and remove unneeded Sprintf - -### Improve - -* Improve documentation section with command comments - -### Remove - -* Remove very old versions of Go in travis CI config -* Remove boolean comparison to get rid of gometalinter warning - -### Update - -* Update license dates -* Update Distance functions signatures to use Float64Data -* Update Sigmoid examples -* Update error names with backward compatibility - -### Use - -* Use relative link to examples/main.go -* Use a single var block for exported errors - - - -## [v0.5.0](https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0) (2019-01-16) - -### Add - -* Add Sigmoid and Softmax functions - -### Fix - -* Fix syntax highlighting and add CumulativeSum func - - - -## [v0.4.0](https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0) (2019-01-14) - -### Add - -* Add goreport badge and documentation section to README.md -* Add Examples to test files -* Add AutoCorrelation and nist tests -* Add String method to statsErr type -* Add Y coordinate error for ExponentialRegression -* Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43)) -* Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40)) -* Add more tests and rename distance files -* Add coverage and benchmarks to azure pipeline -* Add go tests to azure pipeline - -### Change - -* Change travis tip alias to master -* Change codecov to coveralls for code coverage - -### Fix - -* Fix a few lint warnings -* Fix example error - -### Improve - -* Improve test coverage of distance functions - -### Only - -* Only run travis on stable and tip versions -* Only check code coverage on tip - -### Remove - -* Remove azure CI pipeline -* Remove unnecessary type conversions - -### Return - -* Return EmptyInputErr instead of EmptyInput - -### Set - -* Set up CI with Azure Pipelines - - - -## [0.3.0](https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0) (2017-12-02) - -### Add - -* Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35)) -* Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34)) -* Add support for time.Duration -* Add LoadRawData to docs and examples -* Add unit test for edge case that wasn't covered -* Add unit tests for edge cases that weren't covered -* Add pearson alias delegating to correlation -* Add CovariancePopulation to Float64Data -* Add pearson product-moment correlation coefficient -* Add population covariance -* Add random slice benchmarks -* Add all applicable functions as methods to Float64Data type -* Add MIT license badge -* Add link to examples/methods.go -* Add Protips for usage and documentation sections -* Add tests for rounding up -* Add webdoc target and remove linting from test target -* Add example usage and consolidate contributing information - -### Added - -* Added MedianAbsoluteDeviation - -### Annotation - -* Annotation spelling error - -### Auto - -* auto commit -* auto commit - -### Calculate - -* Calculate correlation with sdev and covp - -### Clean - -* Clean up README.md and add info for offline docs - -### Consolidated - -* Consolidated all error values. - -### Fix - -* Fix Percentile logic -* Fix InterQuartileRange method test -* Fix zero percent bug and add test -* Fix usage example output typos - -### Improve - -* Improve bounds checking in Percentile -* Improve error log messaging - -### Imput - -* Imput -> Input - -### Include - -* Include alternative way to set Float64Data in example - -### Make - -* Make various changes to README.md - -### Merge - -* Merge branch 'master' of github.com:montanaflynn/stats -* Merge master - -### Mode - -* Mode calculation fix and tests - -### Realized - -* Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice. Benchmark joy ensued. - -### Refactor - -* Refactor testing of Round() -* Refactor setting Coordinate y field using Exp in place of Pow -* Refactor Makefile and add docs target - -### Remove - -* Remove deep links to types and functions - -### Rename - -* Rename file from types to data - -### Retrieve - -* Retrieve InterQuartileRange for the Float64Data. - -### Split - -* Split up stats.go into separate files - -### Support - -* Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36)) - -### Switch - -* Switch default and check targets - -### Update - -* Update Readme -* Update example methods and some text -* Update README and include Float64Data type method examples - -### Pull Requests - -* Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile -* Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test -* Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master -* Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds -* Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1 -* Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration -* Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master -* Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master -* Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug -* Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master -* Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master -* Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master -* Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson -* Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD -* Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master -* Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce -* Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite - - - -## [0.2.0](https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0) (2015-10-14) - -### Add - -* Add Makefile with gometalinter, testing, benchmarking and coverage report targets -* Add comments describing functions and structs -* Add Correlation func -* Add Covariance func -* Add tests for new function shortcuts -* Add StandardDeviation function as a shortcut to StandardDeviationPopulation -* Add Float64Data and Series types - -### Change - -* Change Sample to return a standard []float64 type - -### Fix - -* Fix broken link to Makefile -* Fix broken link and simplify code coverage reporting command -* Fix go vet warning about printf type placeholder -* Fix failing codecov test coverage reporting -* Fix link to CHANGELOG.md - -### Fixed - -* Fixed typographical error, changed accomdate to accommodate in README. - -### Include - -* Include Variance and StandardDeviation shortcuts - -### Pass - -* Pass gometalinter - -### Refactor - -* Refactor Variance function to be the same as population variance - -### Release - -* Release version 0.2.0 - -### Remove - -* Remove unneeded do packages and update cover URL -* Remove sudo from pip install - -### Reorder - -* Reorder functions and sections - -### Revert - -* Revert to legacy containers to preserve go1.1 testing - -### Switch - -* Switch from legacy to container-based CI infrastructure - -### Update - -* Update contributing instructions and mention Makefile - -### Pull Requests - -* Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate - - - -## [0.1.0](https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0) (2015-08-19) - -### Add - -* Add CONTRIBUTING.md - -### Rename - -* Rename functions while preserving backwards compatibility - - - -## 0.0.9 (2015-08-18) - -### Add - -* Add HarmonicMean func -* Add GeometricMean func -* Add .gitignore to avoid commiting test coverage report -* Add Outliers stuct and QuantileOutliers func -* Add Interquartile Range, Midhinge and Trimean examples -* Add Trimean -* Add Midhinge -* Add Inter Quartile Range -* Add a unit test to check for an empty slice error -* Add Quantiles struct and Quantile func -* Add more tests and fix a typo -* Add Golang 1.5 to build tests -* Add a standard MIT license file -* Add basic benchmarking -* Add regression models -* Add codecov token -* Add codecov -* Add check for slices with a single item -* Add coverage tests -* Add back previous Go versions to Travis CI -* Add Travis CI -* Add GoDoc badge -* Add Percentile and Float64ToInt functions -* Add another rounding test for whole numbers -* Add build status badge -* Add code coverage badge -* Add test for NaN, achieving 100% code coverage -* Add round function -* Add standard deviation function -* Add sum function - -### Add - -* add tests for sample -* add sample - -### Added - -* Added sample and population variance and deviation functions -* Added README - -### Adjust - -* Adjust API ordering - -### Avoid - -* Avoid unintended consequence of using sort - -### Better - -* Better performing min/max -* Better description - -### Change - -* Change package path to potentially fix a bug in earlier versions of Go - -### Clean - -* Clean up README and add some more information -* Clean up test error - -### Consistent - -* Consistent empty slice error messages -* Consistent var naming -* Consistent func declaration - -### Convert - -* Convert ints to floats - -### Duplicate - -* Duplicate packages for all versions - -### Export - -* Export Coordinate struct fields - -### First - -* First commit - -### Fix - -* Fix copy pasta mistake testing the wrong function -* Fix error message -* Fix usage output and edit API doc section -* Fix testing edgecase where map was in wrong order -* Fix usage example -* Fix usage examples - -### Include - -* Include the Nearest Rank method of calculating percentiles - -### More - -* More commenting - -### Move - -* Move GoDoc link to top - -### Redirect - -* Redirect kills newer versions of Go - -### Refactor - -* Refactor code and error checking - -### Remove - -* Remove unnecassary typecasting in sum func -* Remove cover since it doesn't work for later versions of go -* Remove golint and gocoveralls - -### Rename - -* Rename StandardDev to StdDev -* Rename StandardDev to StdDev - -### Return - -* Return errors for all functions - -### Run - -* Run go fmt to clean up formatting - -### Simplify - -* Simplify min/max function - -### Start - -* Start with minimal tests - -### Switch - -* Switch wercker to travis and update todos - -### Table - -* table testing style - -### Update - -* Update README and move the example main.go into it's own file -* Update TODO list -* Update README -* Update usage examples and todos - -### Use - -* Use codecov the recommended way -* Use correct string formatting types - -### Pull Requests - -* Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample - diff --git a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md deleted file mode 100644 index b567889..0000000 --- a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md +++ /dev/null @@ -1,1237 +0,0 @@ - - -# stats -`import "github.com/montanaflynn/stats"` - -* [Overview](#pkg-overview) -* [Index](#pkg-index) -* [Examples](#pkg-examples) -* [Subdirectories](#pkg-subdirectories) - -## Overview -Package stats is a well tested and comprehensive -statistics library package with no dependencies. - -Example Usage: - - - // start with some source data to use - data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} - - // you could also use different types like this - // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) - // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) - // etc... - - median, _ := stats.Median(data) - fmt.Println(median) // 3.65 - - roundedMedian, _ := stats.Round(median, 0) - fmt.Println(roundedMedian) // 4 - -MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) - - - - -## Index -* [Variables](#pkg-variables) -* [func AutoCorrelation(data Float64Data, lags int) (float64, error)](#AutoCorrelation) -* [func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ChebyshevDistance) -* [func Correlation(data1, data2 Float64Data) (float64, error)](#Correlation) -* [func Covariance(data1, data2 Float64Data) (float64, error)](#Covariance) -* [func CovariancePopulation(data1, data2 Float64Data) (float64, error)](#CovariancePopulation) -* [func CumulativeSum(input Float64Data) ([]float64, error)](#CumulativeSum) -* [func Entropy(input Float64Data) (float64, error)](#Entropy) -* [func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#EuclideanDistance) -* [func GeometricMean(input Float64Data) (float64, error)](#GeometricMean) -* [func HarmonicMean(input Float64Data) (float64, error)](#HarmonicMean) -* [func InterQuartileRange(input Float64Data) (float64, error)](#InterQuartileRange) -* [func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ManhattanDistance) -* [func Max(input Float64Data) (max float64, err error)](#Max) -* [func Mean(input Float64Data) (float64, error)](#Mean) -* [func Median(input Float64Data) (median float64, err error)](#Median) -* [func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviation) -* [func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviationPopulation) -* [func Midhinge(input Float64Data) (float64, error)](#Midhinge) -* [func Min(input Float64Data) (min float64, err error)](#Min) -* [func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)](#MinkowskiDistance) -* [func Mode(input Float64Data) (mode []float64, err error)](#Mode) -* [func Ncr(n, r int) int](#Ncr) -* [func NormBoxMullerRvs(loc float64, scale float64, size int) []float64](#NormBoxMullerRvs) -* [func NormCdf(x float64, loc float64, scale float64) float64](#NormCdf) -* [func NormEntropy(loc float64, scale float64) float64](#NormEntropy) -* [func NormFit(data []float64) [2]float64](#NormFit) -* [func NormInterval(alpha float64, loc float64, scale float64) [2]float64](#NormInterval) -* [func NormIsf(p float64, loc float64, scale float64) (x float64)](#NormIsf) -* [func NormLogCdf(x float64, loc float64, scale float64) float64](#NormLogCdf) -* [func NormLogPdf(x float64, loc float64, scale float64) float64](#NormLogPdf) -* [func NormLogSf(x float64, loc float64, scale float64) float64](#NormLogSf) -* [func NormMean(loc float64, scale float64) float64](#NormMean) -* [func NormMedian(loc float64, scale float64) float64](#NormMedian) -* [func NormMoment(n int, loc float64, scale float64) float64](#NormMoment) -* [func NormPdf(x float64, loc float64, scale float64) float64](#NormPdf) -* [func NormPpf(p float64, loc float64, scale float64) (x float64)](#NormPpf) -* [func NormPpfRvs(loc float64, scale float64, size int) []float64](#NormPpfRvs) -* [func NormSf(x float64, loc float64, scale float64) float64](#NormSf) -* [func NormStats(loc float64, scale float64, moments string) []float64](#NormStats) -* [func NormStd(loc float64, scale float64) float64](#NormStd) -* [func NormVar(loc float64, scale float64) float64](#NormVar) -* [func Pearson(data1, data2 Float64Data) (float64, error)](#Pearson) -* [func Percentile(input Float64Data, percent float64) (percentile float64, err error)](#Percentile) -* [func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)](#PercentileNearestRank) -* [func PopulationVariance(input Float64Data) (pvar float64, err error)](#PopulationVariance) -* [func Round(input float64, places int) (rounded float64, err error)](#Round) -* [func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)](#Sample) -* [func SampleVariance(input Float64Data) (svar float64, err error)](#SampleVariance) -* [func Sigmoid(input Float64Data) ([]float64, error)](#Sigmoid) -* [func SoftMax(input Float64Data) ([]float64, error)](#SoftMax) -* [func StableSample(input Float64Data, takenum int) ([]float64, error)](#StableSample) -* [func StandardDeviation(input Float64Data) (sdev float64, err error)](#StandardDeviation) -* [func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)](#StandardDeviationPopulation) -* [func StandardDeviationSample(input Float64Data) (sdev float64, err error)](#StandardDeviationSample) -* [func StdDevP(input Float64Data) (sdev float64, err error)](#StdDevP) -* [func StdDevS(input Float64Data) (sdev float64, err error)](#StdDevS) -* [func Sum(input Float64Data) (sum float64, err error)](#Sum) -* [func Trimean(input Float64Data) (float64, error)](#Trimean) -* [func VarP(input Float64Data) (sdev float64, err error)](#VarP) -* [func VarS(input Float64Data) (sdev float64, err error)](#VarS) -* [func Variance(input Float64Data) (sdev float64, err error)](#Variance) -* [type Coordinate](#Coordinate) - * [func ExpReg(s []Coordinate) (regressions []Coordinate, err error)](#ExpReg) - * [func LinReg(s []Coordinate) (regressions []Coordinate, err error)](#LinReg) - * [func LogReg(s []Coordinate) (regressions []Coordinate, err error)](#LogReg) -* [type Float64Data](#Float64Data) - * [func LoadRawData(raw interface{}) (f Float64Data)](#LoadRawData) - * [func (f Float64Data) AutoCorrelation(lags int) (float64, error)](#Float64Data.AutoCorrelation) - * [func (f Float64Data) Correlation(d Float64Data) (float64, error)](#Float64Data.Correlation) - * [func (f Float64Data) Covariance(d Float64Data) (float64, error)](#Float64Data.Covariance) - * [func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)](#Float64Data.CovariancePopulation) - * [func (f Float64Data) CumulativeSum() ([]float64, error)](#Float64Data.CumulativeSum) - * [func (f Float64Data) Entropy() (float64, error)](#Float64Data.Entropy) - * [func (f Float64Data) GeometricMean() (float64, error)](#Float64Data.GeometricMean) - * [func (f Float64Data) Get(i int) float64](#Float64Data.Get) - * [func (f Float64Data) HarmonicMean() (float64, error)](#Float64Data.HarmonicMean) - * [func (f Float64Data) InterQuartileRange() (float64, error)](#Float64Data.InterQuartileRange) - * [func (f Float64Data) Len() int](#Float64Data.Len) - * [func (f Float64Data) Less(i, j int) bool](#Float64Data.Less) - * [func (f Float64Data) Max() (float64, error)](#Float64Data.Max) - * [func (f Float64Data) Mean() (float64, error)](#Float64Data.Mean) - * [func (f Float64Data) Median() (float64, error)](#Float64Data.Median) - * [func (f Float64Data) MedianAbsoluteDeviation() (float64, error)](#Float64Data.MedianAbsoluteDeviation) - * [func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)](#Float64Data.MedianAbsoluteDeviationPopulation) - * [func (f Float64Data) Midhinge(d Float64Data) (float64, error)](#Float64Data.Midhinge) - * [func (f Float64Data) Min() (float64, error)](#Float64Data.Min) - * [func (f Float64Data) Mode() ([]float64, error)](#Float64Data.Mode) - * [func (f Float64Data) Pearson(d Float64Data) (float64, error)](#Float64Data.Pearson) - * [func (f Float64Data) Percentile(p float64) (float64, error)](#Float64Data.Percentile) - * [func (f Float64Data) PercentileNearestRank(p float64) (float64, error)](#Float64Data.PercentileNearestRank) - * [func (f Float64Data) PopulationVariance() (float64, error)](#Float64Data.PopulationVariance) - * [func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)](#Float64Data.Quartile) - * [func (f Float64Data) QuartileOutliers() (Outliers, error)](#Float64Data.QuartileOutliers) - * [func (f Float64Data) Quartiles() (Quartiles, error)](#Float64Data.Quartiles) - * [func (f Float64Data) Sample(n int, r bool) ([]float64, error)](#Float64Data.Sample) - * [func (f Float64Data) SampleVariance() (float64, error)](#Float64Data.SampleVariance) - * [func (f Float64Data) Sigmoid() ([]float64, error)](#Float64Data.Sigmoid) - * [func (f Float64Data) SoftMax() ([]float64, error)](#Float64Data.SoftMax) - * [func (f Float64Data) StandardDeviation() (float64, error)](#Float64Data.StandardDeviation) - * [func (f Float64Data) StandardDeviationPopulation() (float64, error)](#Float64Data.StandardDeviationPopulation) - * [func (f Float64Data) StandardDeviationSample() (float64, error)](#Float64Data.StandardDeviationSample) - * [func (f Float64Data) Sum() (float64, error)](#Float64Data.Sum) - * [func (f Float64Data) Swap(i, j int)](#Float64Data.Swap) - * [func (f Float64Data) Trimean(d Float64Data) (float64, error)](#Float64Data.Trimean) - * [func (f Float64Data) Variance() (float64, error)](#Float64Data.Variance) -* [type Outliers](#Outliers) - * [func QuartileOutliers(input Float64Data) (Outliers, error)](#QuartileOutliers) -* [type Quartiles](#Quartiles) - * [func Quartile(input Float64Data) (Quartiles, error)](#Quartile) -* [type Series](#Series) - * [func ExponentialRegression(s Series) (regressions Series, err error)](#ExponentialRegression) - * [func LinearRegression(s Series) (regressions Series, err error)](#LinearRegression) - * [func LogarithmicRegression(s Series) (regressions Series, err error)](#LogarithmicRegression) - -#### Examples -* [AutoCorrelation](#example_AutoCorrelation) -* [ChebyshevDistance](#example_ChebyshevDistance) -* [Correlation](#example_Correlation) -* [CumulativeSum](#example_CumulativeSum) -* [Entropy](#example_Entropy) -* [LinearRegression](#example_LinearRegression) -* [LoadRawData](#example_LoadRawData) -* [Max](#example_Max) -* [Median](#example_Median) -* [Min](#example_Min) -* [Round](#example_Round) -* [Sigmoid](#example_Sigmoid) -* [SoftMax](#example_SoftMax) -* [Sum](#example_Sum) - -#### Package files -[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go) - - - -## Variables -``` go -var ( - // ErrEmptyInput Input must not be empty - ErrEmptyInput = statsError{"Input must not be empty."} - // ErrNaN Not a number - ErrNaN = statsError{"Not a number."} - // ErrNegative Must not contain negative values - ErrNegative = statsError{"Must not contain negative values."} - // ErrZero Must not contain zero values - ErrZero = statsError{"Must not contain zero values."} - // ErrBounds Input is outside of range - ErrBounds = statsError{"Input is outside of range."} - // ErrSize Must be the same length - ErrSize = statsError{"Must be the same length."} - // ErrInfValue Value is infinite - ErrInfValue = statsError{"Value is infinite."} - // ErrYCoord Y Value must be greater than zero - ErrYCoord = statsError{"Y Value must be greater than zero."} -) -``` -These are the package-wide error values. -All error identification should use these values. -https://github.com/golang/go/wiki/Errors#naming - -``` go -var ( - EmptyInputErr = ErrEmptyInput - NaNErr = ErrNaN - NegativeErr = ErrNegative - ZeroErr = ErrZero - BoundsErr = ErrBounds - SizeErr = ErrSize - InfValue = ErrInfValue - YCoordErr = ErrYCoord - EmptyInput = ErrEmptyInput -) -``` -Legacy error names that didn't start with Err - - - -## func [AutoCorrelation](/correlation.go?s=853:918#L38) -``` go -func AutoCorrelation(data Float64Data, lags int) (float64, error) -``` -AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay - - - -## func [ChebyshevDistance](/distances.go?s=368:456#L20) -``` go -func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) -``` -ChebyshevDistance computes the Chebyshev distance between two data sets - - - -## func [Correlation](/correlation.go?s=112:171#L8) -``` go -func Correlation(data1, data2 Float64Data) (float64, error) -``` -Correlation describes the degree of relationship between two sets of data - - - -## func [Covariance](/variance.go?s=1284:1342#L53) -``` go -func Covariance(data1, data2 Float64Data) (float64, error) -``` -Covariance is a measure of how much two sets of data change - - - -## func [CovariancePopulation](/variance.go?s=1864:1932#L81) -``` go -func CovariancePopulation(data1, data2 Float64Data) (float64, error) -``` -CovariancePopulation computes covariance for entire population between two variables. - - - -## func [CumulativeSum](/cumulative_sum.go?s=81:137#L4) -``` go -func CumulativeSum(input Float64Data) ([]float64, error) -``` -CumulativeSum calculates the cumulative sum of the input slice - - - -## func [Entropy](/entropy.go?s=77:125#L6) -``` go -func Entropy(input Float64Data) (float64, error) -``` -Entropy provides calculation of the entropy - - - -## func [EuclideanDistance](/distances.go?s=836:924#L36) -``` go -func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) -``` -EuclideanDistance computes the Euclidean distance between two data sets - - - -## func [GeometricMean](/mean.go?s=319:373#L18) -``` go -func GeometricMean(input Float64Data) (float64, error) -``` -GeometricMean gets the geometric mean for a slice of numbers - - - -## func [HarmonicMean](/mean.go?s=717:770#L40) -``` go -func HarmonicMean(input Float64Data) (float64, error) -``` -HarmonicMean gets the harmonic mean for a slice of numbers - - - -## func [InterQuartileRange](/quartile.go?s=821:880#L45) -``` go -func InterQuartileRange(input Float64Data) (float64, error) -``` -InterQuartileRange finds the range between Q1 and Q3 - - - -## func [ManhattanDistance](/distances.go?s=1277:1365#L50) -``` go -func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) -``` -ManhattanDistance computes the Manhattan distance between two data sets - - - -## func [Max](/max.go?s=78:130#L8) -``` go -func Max(input Float64Data) (max float64, err error) -``` -Max finds the highest number in a slice - - - -## func [Mean](/mean.go?s=77:122#L6) -``` go -func Mean(input Float64Data) (float64, error) -``` -Mean gets the average of a slice of numbers - - - -## func [Median](/median.go?s=85:143#L6) -``` go -func Median(input Float64Data) (median float64, err error) -``` -Median gets the median number in a slice of numbers - - - -## func [MedianAbsoluteDeviation](/deviation.go?s=125:197#L6) -``` go -func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) -``` -MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median - - - -## func [MedianAbsoluteDeviationPopulation](/deviation.go?s=360:442#L11) -``` go -func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) -``` -MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median - - - -## func [Midhinge](/quartile.go?s=1075:1124#L55) -``` go -func Midhinge(input Float64Data) (float64, error) -``` -Midhinge finds the average of the first and third quartiles - - - -## func [Min](/min.go?s=78:130#L6) -``` go -func Min(input Float64Data) (min float64, err error) -``` -Min finds the lowest number in a set of data - - - -## func [MinkowskiDistance](/distances.go?s=2152:2256#L75) -``` go -func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) -``` -MinkowskiDistance computes the Minkowski distance between two data sets - -Arguments: - - - dataPointX: First set of data points - dataPointY: Second set of data points. Length of both data - sets must be equal. - lambda: aka p or city blocks; With lambda = 1 - returned distance is manhattan distance and - lambda = 2; it is euclidean distance. Lambda - reaching to infinite - distance would be chebysev - distance. - -Return: - - - Distance or error - - - -## func [Mode](/mode.go?s=85:141#L4) -``` go -func Mode(input Float64Data) (mode []float64, err error) -``` -Mode gets the mode [most frequent value(s)] of a slice of float64s - - - -## func [Ncr](/norm.go?s=7384:7406#L239) -``` go -func Ncr(n, r int) int -``` -Ncr is an N choose R algorithm. -Aaron Cannon's algorithm. - - - -## func [NormBoxMullerRvs](/norm.go?s=667:736#L23) -``` go -func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 -``` -NormBoxMullerRvs generates random variates using the Box–Muller transform. -For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html - - - -## func [NormCdf](/norm.go?s=1826:1885#L52) -``` go -func NormCdf(x float64, loc float64, scale float64) float64 -``` -NormCdf is the cumulative distribution function. - - - -## func [NormEntropy](/norm.go?s=5773:5825#L180) -``` go -func NormEntropy(loc float64, scale float64) float64 -``` -NormEntropy is the differential entropy of the RV. - - - -## func [NormFit](/norm.go?s=6058:6097#L187) -``` go -func NormFit(data []float64) [2]float64 -``` -NormFit returns the maximum likelihood estimators for the Normal Distribution. -Takes array of float64 values. -Returns array of Mean followed by Standard Deviation. - - - -## func [NormInterval](/norm.go?s=6976:7047#L221) -``` go -func NormInterval(alpha float64, loc float64, scale float64) [2]float64 -``` -NormInterval finds endpoints of the range that contains alpha percent of the distribution. - - - -## func [NormIsf](/norm.go?s=4330:4393#L137) -``` go -func NormIsf(p float64, loc float64, scale float64) (x float64) -``` -NormIsf is the inverse survival function (inverse of sf). - - - -## func [NormLogCdf](/norm.go?s=2016:2078#L57) -``` go -func NormLogCdf(x float64, loc float64, scale float64) float64 -``` -NormLogCdf is the log of the cumulative distribution function. - - - -## func [NormLogPdf](/norm.go?s=1590:1652#L47) -``` go -func NormLogPdf(x float64, loc float64, scale float64) float64 -``` -NormLogPdf is the log of the probability density function. - - - -## func [NormLogSf](/norm.go?s=2423:2484#L67) -``` go -func NormLogSf(x float64, loc float64, scale float64) float64 -``` -NormLogSf is the log of the survival function. - - - -## func [NormMean](/norm.go?s=6560:6609#L206) -``` go -func NormMean(loc float64, scale float64) float64 -``` -NormMean is the mean/expected value of the distribution. - - - -## func [NormMedian](/norm.go?s=6431:6482#L201) -``` go -func NormMedian(loc float64, scale float64) float64 -``` -NormMedian is the median of the distribution. - - - -## func [NormMoment](/norm.go?s=4694:4752#L146) -``` go -func NormMoment(n int, loc float64, scale float64) float64 -``` -NormMoment approximates the non-central (raw) moment of order n. -For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution - - - -## func [NormPdf](/norm.go?s=1357:1416#L42) -``` go -func NormPdf(x float64, loc float64, scale float64) float64 -``` -NormPdf is the probability density function. - - - -## func [NormPpf](/norm.go?s=2854:2917#L75) -``` go -func NormPpf(p float64, loc float64, scale float64) (x float64) -``` -NormPpf is the point percentile function. -This is based on Peter John Acklam's inverse normal CDF. -algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). -For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ - - - -## func [NormPpfRvs](/norm.go?s=247:310#L12) -``` go -func NormPpfRvs(loc float64, scale float64, size int) []float64 -``` -NormPpfRvs generates random variates using the Point Percentile Function. -For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ - - - -## func [NormSf](/norm.go?s=2250:2308#L62) -``` go -func NormSf(x float64, loc float64, scale float64) float64 -``` -NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). - - - -## func [NormStats](/norm.go?s=5277:5345#L162) -``` go -func NormStats(loc float64, scale float64, moments string) []float64 -``` -NormStats returns the mean, variance, skew, and/or kurtosis. -Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). -Takes string containing any of 'mvsk'. -Returns array of m v s k in that order. - - - -## func [NormStd](/norm.go?s=6814:6862#L216) -``` go -func NormStd(loc float64, scale float64) float64 -``` -NormStd is the standard deviation of the distribution. - - - -## func [NormVar](/norm.go?s=6675:6723#L211) -``` go -func NormVar(loc float64, scale float64) float64 -``` -NormVar is the variance of the distribution. - - - -## func [Pearson](/correlation.go?s=655:710#L33) -``` go -func Pearson(data1, data2 Float64Data) (float64, error) -``` -Pearson calculates the Pearson product-moment correlation coefficient between two variables - - - -## func [Percentile](/percentile.go?s=98:181#L8) -``` go -func Percentile(input Float64Data, percent float64) (percentile float64, err error) -``` -Percentile finds the relative standing in a slice of floats - - - -## func [PercentileNearestRank](/percentile.go?s=1079:1173#L54) -``` go -func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) -``` -PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method - - - -## func [PopulationVariance](/variance.go?s=828:896#L31) -``` go -func PopulationVariance(input Float64Data) (pvar float64, err error) -``` -PopulationVariance finds the amount of variance within a population - - - -## func [Round](/round.go?s=88:154#L6) -``` go -func Round(input float64, places int) (rounded float64, err error) -``` -Round a float to a specific decimal place or precision - - - -## func [Sample](/sample.go?s=112:192#L9) -``` go -func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) -``` -Sample returns sample from input with replacement or without - - - -## func [SampleVariance](/variance.go?s=1058:1122#L42) -``` go -func SampleVariance(input Float64Data) (svar float64, err error) -``` -SampleVariance finds the amount of variance within a sample - - - -## func [Sigmoid](/sigmoid.go?s=228:278#L9) -``` go -func Sigmoid(input Float64Data) ([]float64, error) -``` -Sigmoid returns the input values in the range of -1 to 1 -along the sigmoid or s-shaped curve, commonly used in -machine learning while training neural networks as an -activation function. - - - -## func [SoftMax](/softmax.go?s=206:256#L8) -``` go -func SoftMax(input Float64Data) ([]float64, error) -``` -SoftMax returns the input values in the range of 0 to 1 -with sum of all the probabilities being equal to one. It -is commonly used in machine learning neural networks. - - - -## func [StableSample](/sample.go?s=974:1042#L50) -``` go -func StableSample(input Float64Data, takenum int) ([]float64, error) -``` -StableSample like stable sort, it returns samples from input while keeps the order of original data. - - - -## func [StandardDeviation](/deviation.go?s=695:762#L27) -``` go -func StandardDeviation(input Float64Data) (sdev float64, err error) -``` -StandardDeviation the amount of variation in the dataset - - - -## func [StandardDeviationPopulation](/deviation.go?s=892:969#L32) -``` go -func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) -``` -StandardDeviationPopulation finds the amount of variation from the population - - - -## func [StandardDeviationSample](/deviation.go?s=1254:1327#L46) -``` go -func StandardDeviationSample(input Float64Data) (sdev float64, err error) -``` -StandardDeviationSample finds the amount of variation from a sample - - - -## func [StdDevP](/legacy.go?s=339:396#L14) -``` go -func StdDevP(input Float64Data) (sdev float64, err error) -``` -StdDevP is a shortcut to StandardDeviationPopulation - - - -## func [StdDevS](/legacy.go?s=497:554#L19) -``` go -func StdDevS(input Float64Data) (sdev float64, err error) -``` -StdDevS is a shortcut to StandardDeviationSample - - - -## func [Sum](/sum.go?s=78:130#L6) -``` go -func Sum(input Float64Data) (sum float64, err error) -``` -Sum adds all the numbers of a slice together - - - -## func [Trimean](/quartile.go?s=1320:1368#L65) -``` go -func Trimean(input Float64Data) (float64, error) -``` -Trimean finds the average of the median and the midhinge - - - -## func [VarP](/legacy.go?s=59:113#L4) -``` go -func VarP(input Float64Data) (sdev float64, err error) -``` -VarP is a shortcut to PopulationVariance - - - -## func [VarS](/legacy.go?s=193:247#L9) -``` go -func VarS(input Float64Data) (sdev float64, err error) -``` -VarS is a shortcut to SampleVariance - - - -## func [Variance](/variance.go?s=659:717#L26) -``` go -func Variance(input Float64Data) (sdev float64, err error) -``` -Variance the amount of variation in the dataset - - - - -## type [Coordinate](/regression.go?s=143:183#L9) -``` go -type Coordinate struct { - X, Y float64 -} - -``` -Coordinate holds the data in a series - - - - - - - -### func [ExpReg](/legacy.go?s=791:856#L29) -``` go -func ExpReg(s []Coordinate) (regressions []Coordinate, err error) -``` -ExpReg is a shortcut to ExponentialRegression - - -### func [LinReg](/legacy.go?s=643:708#L24) -``` go -func LinReg(s []Coordinate) (regressions []Coordinate, err error) -``` -LinReg is a shortcut to LinearRegression - - -### func [LogReg](/legacy.go?s=944:1009#L34) -``` go -func LogReg(s []Coordinate) (regressions []Coordinate, err error) -``` -LogReg is a shortcut to LogarithmicRegression - - - - - -## type [Float64Data](/data.go?s=80:106#L4) -``` go -type Float64Data []float64 -``` -Float64Data is a named type for []float64 with helper methods - - - - - - - -### func [LoadRawData](/load.go?s=119:168#L9) -``` go -func LoadRawData(raw interface{}) (f Float64Data) -``` -LoadRawData parses and converts a slice of mixed data types to floats - - - - - -### func (Float64Data) [AutoCorrelation](/data.go?s=3257:3320#L91) -``` go -func (f Float64Data) AutoCorrelation(lags int) (float64, error) -``` -AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay - - - - -### func (Float64Data) [Correlation](/data.go?s=3058:3122#L86) -``` go -func (f Float64Data) Correlation(d Float64Data) (float64, error) -``` -Correlation describes the degree of relationship between two sets of data - - - - -### func (Float64Data) [Covariance](/data.go?s=4801:4864#L141) -``` go -func (f Float64Data) Covariance(d Float64Data) (float64, error) -``` -Covariance is a measure of how much two sets of data change - - - - -### func (Float64Data) [CovariancePopulation](/data.go?s=4983:5056#L146) -``` go -func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) -``` -CovariancePopulation computes covariance for entire population between two variables - - - - -### func (Float64Data) [CumulativeSum](/data.go?s=883:938#L28) -``` go -func (f Float64Data) CumulativeSum() ([]float64, error) -``` -CumulativeSum returns the cumulative sum of the data - - - - -### func (Float64Data) [Entropy](/data.go?s=5480:5527#L162) -``` go -func (f Float64Data) Entropy() (float64, error) -``` -Entropy provides calculation of the entropy - - - - -### func (Float64Data) [GeometricMean](/data.go?s=1332:1385#L40) -``` go -func (f Float64Data) GeometricMean() (float64, error) -``` -GeometricMean returns the median of the data - - - - -### func (Float64Data) [Get](/data.go?s=129:168#L7) -``` go -func (f Float64Data) Get(i int) float64 -``` -Get item in slice - - - - -### func (Float64Data) [HarmonicMean](/data.go?s=1460:1512#L43) -``` go -func (f Float64Data) HarmonicMean() (float64, error) -``` -HarmonicMean returns the mode of the data - - - - -### func (Float64Data) [InterQuartileRange](/data.go?s=3755:3813#L106) -``` go -func (f Float64Data) InterQuartileRange() (float64, error) -``` -InterQuartileRange finds the range between Q1 and Q3 - - - - -### func (Float64Data) [Len](/data.go?s=217:247#L10) -``` go -func (f Float64Data) Len() int -``` -Len returns length of slice - - - - -### func (Float64Data) [Less](/data.go?s=318:358#L13) -``` go -func (f Float64Data) Less(i, j int) bool -``` -Less returns if one number is less than another - - - - -### func (Float64Data) [Max](/data.go?s=645:688#L22) -``` go -func (f Float64Data) Max() (float64, error) -``` -Max returns the maximum number in the data - - - - -### func (Float64Data) [Mean](/data.go?s=1005:1049#L31) -``` go -func (f Float64Data) Mean() (float64, error) -``` -Mean returns the mean of the data - - - - -### func (Float64Data) [Median](/data.go?s=1111:1157#L34) -``` go -func (f Float64Data) Median() (float64, error) -``` -Median returns the median of the data - - - - -### func (Float64Data) [MedianAbsoluteDeviation](/data.go?s=1630:1693#L46) -``` go -func (f Float64Data) MedianAbsoluteDeviation() (float64, error) -``` -MedianAbsoluteDeviation the median of the absolute deviations from the dataset median - - - - -### func (Float64Data) [MedianAbsoluteDeviationPopulation](/data.go?s=1842:1915#L51) -``` go -func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) -``` -MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median - - - - -### func (Float64Data) [Midhinge](/data.go?s=3912:3973#L111) -``` go -func (f Float64Data) Midhinge(d Float64Data) (float64, error) -``` -Midhinge finds the average of the first and third quartiles - - - - -### func (Float64Data) [Min](/data.go?s=536:579#L19) -``` go -func (f Float64Data) Min() (float64, error) -``` -Min returns the minimum number in the data - - - - -### func (Float64Data) [Mode](/data.go?s=1217:1263#L37) -``` go -func (f Float64Data) Mode() ([]float64, error) -``` -Mode returns the mode of the data - - - - -### func (Float64Data) [Pearson](/data.go?s=3455:3515#L96) -``` go -func (f Float64Data) Pearson(d Float64Data) (float64, error) -``` -Pearson calculates the Pearson product-moment correlation coefficient between two variables. - - - - -### func (Float64Data) [Percentile](/data.go?s=2696:2755#L76) -``` go -func (f Float64Data) Percentile(p float64) (float64, error) -``` -Percentile finds the relative standing in a slice of floats - - - - -### func (Float64Data) [PercentileNearestRank](/data.go?s=2869:2939#L81) -``` go -func (f Float64Data) PercentileNearestRank(p float64) (float64, error) -``` -PercentileNearestRank finds the relative standing using the Nearest Rank method - - - - -### func (Float64Data) [PopulationVariance](/data.go?s=4495:4553#L131) -``` go -func (f Float64Data) PopulationVariance() (float64, error) -``` -PopulationVariance finds the amount of variance within a population - - - - -### func (Float64Data) [Quartile](/data.go?s=3610:3673#L101) -``` go -func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) -``` -Quartile returns the three quartile points from a slice of data - - - - -### func (Float64Data) [QuartileOutliers](/data.go?s=2542:2599#L71) -``` go -func (f Float64Data) QuartileOutliers() (Outliers, error) -``` -QuartileOutliers finds the mild and extreme outliers - - - - -### func (Float64Data) [Quartiles](/data.go?s=5628:5679#L167) -``` go -func (f Float64Data) Quartiles() (Quartiles, error) -``` -Quartiles returns the three quartile points from instance of Float64Data - - - - -### func (Float64Data) [Sample](/data.go?s=4208:4269#L121) -``` go -func (f Float64Data) Sample(n int, r bool) ([]float64, error) -``` -Sample returns sample from input with replacement or without - - - - -### func (Float64Data) [SampleVariance](/data.go?s=4652:4706#L136) -``` go -func (f Float64Data) SampleVariance() (float64, error) -``` -SampleVariance finds the amount of variance within a sample - - - - -### func (Float64Data) [Sigmoid](/data.go?s=5169:5218#L151) -``` go -func (f Float64Data) Sigmoid() ([]float64, error) -``` -Sigmoid returns the input values along the sigmoid or s-shaped curve - - - - -### func (Float64Data) [SoftMax](/data.go?s=5359:5408#L157) -``` go -func (f Float64Data) SoftMax() ([]float64, error) -``` -SoftMax returns the input values in the range of 0 to 1 -with sum of all the probabilities being equal to one. - - - - -### func (Float64Data) [StandardDeviation](/data.go?s=2026:2083#L56) -``` go -func (f Float64Data) StandardDeviation() (float64, error) -``` -StandardDeviation the amount of variation in the dataset - - - - -### func (Float64Data) [StandardDeviationPopulation](/data.go?s=2199:2266#L61) -``` go -func (f Float64Data) StandardDeviationPopulation() (float64, error) -``` -StandardDeviationPopulation finds the amount of variation from the population - - - - -### func (Float64Data) [StandardDeviationSample](/data.go?s=2382:2445#L66) -``` go -func (f Float64Data) StandardDeviationSample() (float64, error) -``` -StandardDeviationSample finds the amount of variation from a sample - - - - -### func (Float64Data) [Sum](/data.go?s=764:807#L25) -``` go -func (f Float64Data) Sum() (float64, error) -``` -Sum returns the total of all the numbers in the data - - - - -### func (Float64Data) [Swap](/data.go?s=425:460#L16) -``` go -func (f Float64Data) Swap(i, j int) -``` -Swap switches out two numbers in slice - - - - -### func (Float64Data) [Trimean](/data.go?s=4059:4119#L116) -``` go -func (f Float64Data) Trimean(d Float64Data) (float64, error) -``` -Trimean finds the average of the median and the midhinge - - - - -### func (Float64Data) [Variance](/data.go?s=4350:4398#L126) -``` go -func (f Float64Data) Variance() (float64, error) -``` -Variance the amount of variation in the dataset - - - - -## type [Outliers](/outlier.go?s=73:139#L4) -``` go -type Outliers struct { - Mild Float64Data - Extreme Float64Data -} - -``` -Outliers holds mild and extreme outliers found in data - - - - - - - -### func [QuartileOutliers](/outlier.go?s=197:255#L10) -``` go -func QuartileOutliers(input Float64Data) (Outliers, error) -``` -QuartileOutliers finds the mild and extreme outliers - - - - - -## type [Quartiles](/quartile.go?s=75:136#L6) -``` go -type Quartiles struct { - Q1 float64 - Q2 float64 - Q3 float64 -} - -``` -Quartiles holds the three quartile points - - - - - - - -### func [Quartile](/quartile.go?s=205:256#L13) -``` go -func Quartile(input Float64Data) (Quartiles, error) -``` -Quartile returns the three quartile points from a slice of data - - - - - -## type [Series](/regression.go?s=76:100#L6) -``` go -type Series []Coordinate -``` -Series is a container for a series of data - - - - - - - -### func [ExponentialRegression](/regression.go?s=1089:1157#L50) -``` go -func ExponentialRegression(s Series) (regressions Series, err error) -``` -ExponentialRegression returns an exponential regression on data series - - -### func [LinearRegression](/regression.go?s=262:325#L14) -``` go -func LinearRegression(s Series) (regressions Series, err error) -``` -LinearRegression finds the least squares linear regression on data series - - -### func [LogarithmicRegression](/regression.go?s=1903:1971#L85) -``` go -func LogarithmicRegression(s Series) (regressions Series, err error) -``` -LogarithmicRegression returns an logarithmic regression on data series - - - - - - - - - -- - - -Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/montanaflynn/stats/LICENSE b/vendor/github.com/montanaflynn/stats/LICENSE deleted file mode 100644 index 1590961..0000000 --- a/vendor/github.com/montanaflynn/stats/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/montanaflynn/stats/Makefile b/vendor/github.com/montanaflynn/stats/Makefile deleted file mode 100644 index 969df12..0000000 --- a/vendor/github.com/montanaflynn/stats/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -.PHONY: all - -default: test lint - -format: - go fmt . - -test: - go test -race - -check: format test - -benchmark: - go test -bench=. -benchmem - -coverage: - go test -coverprofile=coverage.out - go tool cover -html="coverage.out" - -lint: format - golangci-lint run . - -docs: - godoc2md github.com/montanaflynn/stats | sed -e s#src/target/##g > DOCUMENTATION.md - -release: - git-chglog --output CHANGELOG.md --next-tag ${TAG} - git add CHANGELOG.md - git commit -m "Update changelog with ${TAG} changes" - git tag ${TAG} - git-chglog $(TAG) | tail -n +4 | gsed '1s/^/$(TAG)\n/gm' > release-notes.txt - git push origin master ${TAG} - hub release create --copy -F release-notes.txt ${TAG} - diff --git a/vendor/github.com/montanaflynn/stats/README.md b/vendor/github.com/montanaflynn/stats/README.md deleted file mode 100644 index 4495c8d..0000000 --- a/vendor/github.com/montanaflynn/stats/README.md +++ /dev/null @@ -1,228 +0,0 @@ -# Stats - Golang Statistics Package - -[![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url] - -A well tested and comprehensive Golang statistics library / package / module with no dependencies. - -If you have any suggestions, problems or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated! - -## Installation - -``` -go get github.com/montanaflynn/stats -``` - -## Example Usage - -All the functions can be seen in [examples/main.go](examples/main.go) but here's a little taste: - -```go -// start with some source data to use -data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} - -// you could also use different types like this -// data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) -// data := stats.LoadRawData([]interface{}{1.1, "2", 3}) -// etc... - -median, _ := stats.Median(data) -fmt.Println(median) // 3.65 - -roundedMedian, _ := stats.Round(median, 0) -fmt.Println(roundedMedian) // 4 -``` - -## Documentation - -The entire API documentation is available on [GoDoc.org](http://godoc.org/github.com/montanaflynn/stats) or [pkg.go.dev](https://pkg.go.dev/github.com/montanaflynn/stats). - -You can also view docs offline with the following commands: - -``` -# Command line -godoc . # show all exported apis -godoc . Median # show a single function -godoc -ex . Round # show function with example -godoc . Float64Data # show the type and methods - -# Local website -godoc -http=:4444 # start the godoc server on port 4444 -open http://localhost:4444/pkg/github.com/montanaflynn/stats/ -``` - -The exported API is as follows: - -```go -var ( - ErrEmptyInput = statsError{"Input must not be empty."} - ErrNaN = statsError{"Not a number."} - ErrNegative = statsError{"Must not contain negative values."} - ErrZero = statsError{"Must not contain zero values."} - ErrBounds = statsError{"Input is outside of range."} - ErrSize = statsError{"Must be the same length."} - ErrInfValue = statsError{"Value is infinite."} - ErrYCoord = statsError{"Y Value must be greater than zero."} -) - -func Round(input float64, places int) (rounded float64, err error) {} - -type Float64Data []float64 - -func LoadRawData(raw interface{}) (f Float64Data) {} - -func AutoCorrelation(data Float64Data, lags int) (float64, error) {} -func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} -func Correlation(data1, data2 Float64Data) (float64, error) {} -func Covariance(data1, data2 Float64Data) (float64, error) {} -func CovariancePopulation(data1, data2 Float64Data) (float64, error) {} -func CumulativeSum(input Float64Data) ([]float64, error) {} -func Entropy(input Float64Data) (float64, error) {} -func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} -func GeometricMean(input Float64Data) (float64, error) {} -func HarmonicMean(input Float64Data) (float64, error) {} -func InterQuartileRange(input Float64Data) (float64, error) {} -func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} -func Max(input Float64Data) (max float64, err error) {} -func Mean(input Float64Data) (float64, error) {} -func Median(input Float64Data) (median float64, err error) {} -func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {} -func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {} -func Midhinge(input Float64Data) (float64, error) {} -func Min(input Float64Data) (min float64, err error) {} -func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {} -func Mode(input Float64Data) (mode []float64, err error) {} -func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {} -func NormCdf(x float64, loc float64, scale float64) float64 {} -func NormEntropy(loc float64, scale float64) float64 {} -func NormFit(data []float64) [2]float64{} -func NormInterval(alpha float64, loc float64, scale float64 ) [2]float64 {} -func NormIsf(p float64, loc float64, scale float64) (x float64) {} -func NormLogCdf(x float64, loc float64, scale float64) float64 {} -func NormLogPdf(x float64, loc float64, scale float64) float64 {} -func NormLogSf(x float64, loc float64, scale float64) float64 {} -func NormMean(loc float64, scale float64) float64 {} -func NormMedian(loc float64, scale float64) float64 {} -func NormMoment(n int, loc float64, scale float64) float64 {} -func NormPdf(x float64, loc float64, scale float64) float64 {} -func NormPpf(p float64, loc float64, scale float64) (x float64) {} -func NormPpfRvs(loc float64, scale float64, size int) []float64 {} -func NormSf(x float64, loc float64, scale float64) float64 {} -func NormStats(loc float64, scale float64, moments string) []float64 {} -func NormStd(loc float64, scale float64) float64 {} -func NormVar(loc float64, scale float64) float64 {} -func Pearson(data1, data2 Float64Data) (float64, error) {} -func Percentile(input Float64Data, percent float64) (percentile float64, err error) {} -func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {} -func PopulationVariance(input Float64Data) (pvar float64, err error) {} -func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {} -func SampleVariance(input Float64Data) (svar float64, err error) {} -func Sigmoid(input Float64Data) ([]float64, error) {} -func SoftMax(input Float64Data) ([]float64, error) {} -func StableSample(input Float64Data, takenum int) ([]float64, error) {} -func StandardDeviation(input Float64Data) (sdev float64, err error) {} -func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {} -func StandardDeviationSample(input Float64Data) (sdev float64, err error) {} -func StdDevP(input Float64Data) (sdev float64, err error) {} -func StdDevS(input Float64Data) (sdev float64, err error) {} -func Sum(input Float64Data) (sum float64, err error) {} -func Trimean(input Float64Data) (float64, error) {} -func VarP(input Float64Data) (sdev float64, err error) {} -func VarS(input Float64Data) (sdev float64, err error) {} -func Variance(input Float64Data) (sdev float64, err error) {} - -type Coordinate struct { - X, Y float64 -} - -type Series []Coordinate - -func ExponentialRegression(s Series) (regressions Series, err error) {} -func LinearRegression(s Series) (regressions Series, err error) {} -func LogarithmicRegression(s Series) (regressions Series, err error) {} - -type Outliers struct { - Mild Float64Data - Extreme Float64Data -} - -type Quartiles struct { - Q1 float64 - Q2 float64 - Q3 float64 -} - -func Quartile(input Float64Data) (Quartiles, error) {} -func QuartileOutliers(input Float64Data) (Outliers, error) {} -``` - -## Contributing - -Pull request are always welcome no matter how big or small. I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more. - -1. Fork the repo and clone your fork -2. Create new branch (`git checkout -b some-thing`) -3. Make the desired changes -4. Ensure tests pass (`go test -cover` or `make test`) -5. Run lint and fix problems (`go vet .` or `make lint`) -6. Commit changes (`git commit -am 'Did something'`) -7. Push branch (`git push origin some-thing`) -8. Submit pull request - -To make things as seamless as possible please also consider the following steps: - -- Update `examples/main.go` with a simple example of the new feature -- Update `README.md` documentation section with any new exported API -- Keep 100% code coverage (you can check with `make coverage`) -- Squash commits into single units of work with `git rebase -i new-feature` - -## Releasing - -To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md). - -First install the tools used to generate the markdown files: - -``` -go get github.com/davecheney/godoc2md -go get github.com/golangci/golangci-lint/cmd/golangci-lint -``` - -Then you can run these `make` directives: - -``` -# Generate DOCUMENTATION.md -make docs -``` - -Then we can create a [CHANGELOG.md](/CHANGELOG.md) a new git tag and a github release: - -``` -make release TAG=v0.x.x -``` - -## MIT License - -Copyright (c) 2014-2021 Montana Flynn (https://montanaflynn.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -[travis-url]: https://travis-ci.org/montanaflynn/stats -[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg - -[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master -[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg - -[goreport-url]: https://goreportcard.com/report/github.com/montanaflynn/stats -[goreport-svg]: https://goreportcard.com/badge/github.com/montanaflynn/stats - -[godoc-url]: https://godoc.org/github.com/montanaflynn/stats -[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg - -[pkggodev-url]: https://pkg.go.dev/github.com/montanaflynn/stats -[pkggodev-svg]: https://gistcdn.githack.com/montanaflynn/b02f1d78d8c0de8435895d7e7cd0d473/raw/17f2a5a69f1323ecd42c00e0683655da96d9ecc8/badge.svg - -[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE -[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg diff --git a/vendor/github.com/montanaflynn/stats/correlation.go b/vendor/github.com/montanaflynn/stats/correlation.go deleted file mode 100644 index 4acab94..0000000 --- a/vendor/github.com/montanaflynn/stats/correlation.go +++ /dev/null @@ -1,60 +0,0 @@ -package stats - -import ( - "math" -) - -// Correlation describes the degree of relationship between two sets of data -func Correlation(data1, data2 Float64Data) (float64, error) { - - l1 := data1.Len() - l2 := data2.Len() - - if l1 == 0 || l2 == 0 { - return math.NaN(), EmptyInputErr - } - - if l1 != l2 { - return math.NaN(), SizeErr - } - - sdev1, _ := StandardDeviationPopulation(data1) - sdev2, _ := StandardDeviationPopulation(data2) - - if sdev1 == 0 || sdev2 == 0 { - return 0, nil - } - - covp, _ := CovariancePopulation(data1, data2) - return covp / (sdev1 * sdev2), nil -} - -// Pearson calculates the Pearson product-moment correlation coefficient between two variables -func Pearson(data1, data2 Float64Data) (float64, error) { - return Correlation(data1, data2) -} - -// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay -func AutoCorrelation(data Float64Data, lags int) (float64, error) { - if len(data) < 1 { - return 0, EmptyInputErr - } - - mean, _ := Mean(data) - - var result, q float64 - - for i := 0; i < lags; i++ { - v := (data[0] - mean) * (data[0] - mean) - for i := 1; i < len(data); i++ { - delta0 := data[i-1] - mean - delta1 := data[i] - mean - q += (delta0*delta1 - q) / float64(i+1) - v += (delta1*delta1 - v) / float64(i+1) - } - - result = q / v - } - - return result, nil -} diff --git a/vendor/github.com/montanaflynn/stats/cumulative_sum.go b/vendor/github.com/montanaflynn/stats/cumulative_sum.go deleted file mode 100644 index e5305da..0000000 --- a/vendor/github.com/montanaflynn/stats/cumulative_sum.go +++ /dev/null @@ -1,21 +0,0 @@ -package stats - -// CumulativeSum calculates the cumulative sum of the input slice -func CumulativeSum(input Float64Data) ([]float64, error) { - - if input.Len() == 0 { - return Float64Data{}, EmptyInput - } - - cumSum := make([]float64, input.Len()) - - for i, val := range input { - if i == 0 { - cumSum[i] = val - } else { - cumSum[i] = cumSum[i-1] + val - } - } - - return cumSum, nil -} diff --git a/vendor/github.com/montanaflynn/stats/data.go b/vendor/github.com/montanaflynn/stats/data.go deleted file mode 100644 index b86f0d8..0000000 --- a/vendor/github.com/montanaflynn/stats/data.go +++ /dev/null @@ -1,169 +0,0 @@ -package stats - -// Float64Data is a named type for []float64 with helper methods -type Float64Data []float64 - -// Get item in slice -func (f Float64Data) Get(i int) float64 { return f[i] } - -// Len returns length of slice -func (f Float64Data) Len() int { return len(f) } - -// Less returns if one number is less than another -func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] } - -// Swap switches out two numbers in slice -func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// Min returns the minimum number in the data -func (f Float64Data) Min() (float64, error) { return Min(f) } - -// Max returns the maximum number in the data -func (f Float64Data) Max() (float64, error) { return Max(f) } - -// Sum returns the total of all the numbers in the data -func (f Float64Data) Sum() (float64, error) { return Sum(f) } - -// CumulativeSum returns the cumulative sum of the data -func (f Float64Data) CumulativeSum() ([]float64, error) { return CumulativeSum(f) } - -// Mean returns the mean of the data -func (f Float64Data) Mean() (float64, error) { return Mean(f) } - -// Median returns the median of the data -func (f Float64Data) Median() (float64, error) { return Median(f) } - -// Mode returns the mode of the data -func (f Float64Data) Mode() ([]float64, error) { return Mode(f) } - -// GeometricMean returns the median of the data -func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) } - -// HarmonicMean returns the mode of the data -func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) } - -// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median -func (f Float64Data) MedianAbsoluteDeviation() (float64, error) { - return MedianAbsoluteDeviation(f) -} - -// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median -func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) { - return MedianAbsoluteDeviationPopulation(f) -} - -// StandardDeviation the amount of variation in the dataset -func (f Float64Data) StandardDeviation() (float64, error) { - return StandardDeviation(f) -} - -// StandardDeviationPopulation finds the amount of variation from the population -func (f Float64Data) StandardDeviationPopulation() (float64, error) { - return StandardDeviationPopulation(f) -} - -// StandardDeviationSample finds the amount of variation from a sample -func (f Float64Data) StandardDeviationSample() (float64, error) { - return StandardDeviationSample(f) -} - -// QuartileOutliers finds the mild and extreme outliers -func (f Float64Data) QuartileOutliers() (Outliers, error) { - return QuartileOutliers(f) -} - -// Percentile finds the relative standing in a slice of floats -func (f Float64Data) Percentile(p float64) (float64, error) { - return Percentile(f, p) -} - -// PercentileNearestRank finds the relative standing using the Nearest Rank method -func (f Float64Data) PercentileNearestRank(p float64) (float64, error) { - return PercentileNearestRank(f, p) -} - -// Correlation describes the degree of relationship between two sets of data -func (f Float64Data) Correlation(d Float64Data) (float64, error) { - return Correlation(f, d) -} - -// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay -func (f Float64Data) AutoCorrelation(lags int) (float64, error) { - return AutoCorrelation(f, lags) -} - -// Pearson calculates the Pearson product-moment correlation coefficient between two variables. -func (f Float64Data) Pearson(d Float64Data) (float64, error) { - return Pearson(f, d) -} - -// Quartile returns the three quartile points from a slice of data -func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) { - return Quartile(d) -} - -// InterQuartileRange finds the range between Q1 and Q3 -func (f Float64Data) InterQuartileRange() (float64, error) { - return InterQuartileRange(f) -} - -// Midhinge finds the average of the first and third quartiles -func (f Float64Data) Midhinge(d Float64Data) (float64, error) { - return Midhinge(d) -} - -// Trimean finds the average of the median and the midhinge -func (f Float64Data) Trimean(d Float64Data) (float64, error) { - return Trimean(d) -} - -// Sample returns sample from input with replacement or without -func (f Float64Data) Sample(n int, r bool) ([]float64, error) { - return Sample(f, n, r) -} - -// Variance the amount of variation in the dataset -func (f Float64Data) Variance() (float64, error) { - return Variance(f) -} - -// PopulationVariance finds the amount of variance within a population -func (f Float64Data) PopulationVariance() (float64, error) { - return PopulationVariance(f) -} - -// SampleVariance finds the amount of variance within a sample -func (f Float64Data) SampleVariance() (float64, error) { - return SampleVariance(f) -} - -// Covariance is a measure of how much two sets of data change -func (f Float64Data) Covariance(d Float64Data) (float64, error) { - return Covariance(f, d) -} - -// CovariancePopulation computes covariance for entire population between two variables -func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) { - return CovariancePopulation(f, d) -} - -// Sigmoid returns the input values along the sigmoid or s-shaped curve -func (f Float64Data) Sigmoid() ([]float64, error) { - return Sigmoid(f) -} - -// SoftMax returns the input values in the range of 0 to 1 -// with sum of all the probabilities being equal to one. -func (f Float64Data) SoftMax() ([]float64, error) { - return SoftMax(f) -} - -// Entropy provides calculation of the entropy -func (f Float64Data) Entropy() (float64, error) { - return Entropy(f) -} - -// Quartiles returns the three quartile points from instance of Float64Data -func (f Float64Data) Quartiles() (Quartiles, error) { - return Quartile(f) -} diff --git a/vendor/github.com/montanaflynn/stats/deviation.go b/vendor/github.com/montanaflynn/stats/deviation.go deleted file mode 100644 index e69a19f..0000000 --- a/vendor/github.com/montanaflynn/stats/deviation.go +++ /dev/null @@ -1,57 +0,0 @@ -package stats - -import "math" - -// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median -func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) { - return MedianAbsoluteDeviationPopulation(input) -} - -// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median -func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) { - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - i := copyslice(input) - m, _ := Median(i) - - for key, value := range i { - i[key] = math.Abs(value - m) - } - - return Median(i) -} - -// StandardDeviation the amount of variation in the dataset -func StandardDeviation(input Float64Data) (sdev float64, err error) { - return StandardDeviationPopulation(input) -} - -// StandardDeviationPopulation finds the amount of variation from the population -func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - // Get the population variance - vp, _ := PopulationVariance(input) - - // Return the population standard deviation - return math.Sqrt(vp), nil -} - -// StandardDeviationSample finds the amount of variation from a sample -func StandardDeviationSample(input Float64Data) (sdev float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - // Get the sample variance - vs, _ := SampleVariance(input) - - // Return the sample standard deviation - return math.Sqrt(vs), nil -} diff --git a/vendor/github.com/montanaflynn/stats/distances.go b/vendor/github.com/montanaflynn/stats/distances.go deleted file mode 100644 index c2b7d8f..0000000 --- a/vendor/github.com/montanaflynn/stats/distances.go +++ /dev/null @@ -1,88 +0,0 @@ -package stats - -import ( - "math" -) - -// Validate data for distance calculation -func validateData(dataPointX, dataPointY Float64Data) error { - if len(dataPointX) == 0 || len(dataPointY) == 0 { - return EmptyInputErr - } - - if len(dataPointX) != len(dataPointY) { - return SizeErr - } - return nil -} - -// ChebyshevDistance computes the Chebyshev distance between two data sets -func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - var tempDistance float64 - for i := 0; i < len(dataPointY); i++ { - tempDistance = math.Abs(dataPointX[i] - dataPointY[i]) - if distance < tempDistance { - distance = tempDistance - } - } - return distance, nil -} - -// EuclideanDistance computes the Euclidean distance between two data sets -func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { - - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - distance = 0 - for i := 0; i < len(dataPointX); i++ { - distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i])) - } - return math.Sqrt(distance), nil -} - -// ManhattanDistance computes the Manhattan distance between two data sets -func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - distance = 0 - for i := 0; i < len(dataPointX); i++ { - distance = distance + math.Abs(dataPointX[i]-dataPointY[i]) - } - return distance, nil -} - -// MinkowskiDistance computes the Minkowski distance between two data sets -// -// Arguments: -// dataPointX: First set of data points -// dataPointY: Second set of data points. Length of both data -// sets must be equal. -// lambda: aka p or city blocks; With lambda = 1 -// returned distance is manhattan distance and -// lambda = 2; it is euclidean distance. Lambda -// reaching to infinite - distance would be chebysev -// distance. -// Return: -// Distance or error -func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) { - err = validateData(dataPointX, dataPointY) - if err != nil { - return math.NaN(), err - } - for i := 0; i < len(dataPointY); i++ { - distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda) - } - distance = math.Pow(distance, 1/lambda) - if math.IsInf(distance, 1) { - return math.NaN(), InfValue - } - return distance, nil -} diff --git a/vendor/github.com/montanaflynn/stats/doc.go b/vendor/github.com/montanaflynn/stats/doc.go deleted file mode 100644 index facb8d5..0000000 --- a/vendor/github.com/montanaflynn/stats/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Package stats is a well tested and comprehensive -statistics library package with no dependencies. - -Example Usage: - - // start with some source data to use - data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} - - // you could also use different types like this - // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) - // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) - // etc... - - median, _ := stats.Median(data) - fmt.Println(median) // 3.65 - - roundedMedian, _ := stats.Round(median, 0) - fmt.Println(roundedMedian) // 4 - -MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) -*/ -package stats diff --git a/vendor/github.com/montanaflynn/stats/entropy.go b/vendor/github.com/montanaflynn/stats/entropy.go deleted file mode 100644 index 95263b0..0000000 --- a/vendor/github.com/montanaflynn/stats/entropy.go +++ /dev/null @@ -1,31 +0,0 @@ -package stats - -import "math" - -// Entropy provides calculation of the entropy -func Entropy(input Float64Data) (float64, error) { - input, err := normalize(input) - if err != nil { - return math.NaN(), err - } - var result float64 - for i := 0; i < input.Len(); i++ { - v := input.Get(i) - if v == 0 { - continue - } - result += (v * math.Log(v)) - } - return -result, nil -} - -func normalize(input Float64Data) (Float64Data, error) { - sum, err := input.Sum() - if err != nil { - return Float64Data{}, err - } - for i := 0; i < input.Len(); i++ { - input[i] = input[i] / sum - } - return input, nil -} diff --git a/vendor/github.com/montanaflynn/stats/errors.go b/vendor/github.com/montanaflynn/stats/errors.go deleted file mode 100644 index 95f82ff..0000000 --- a/vendor/github.com/montanaflynn/stats/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -package stats - -type statsError struct { - err string -} - -func (s statsError) Error() string { - return s.err -} - -func (s statsError) String() string { - return s.err -} - -// These are the package-wide error values. -// All error identification should use these values. -// https://github.com/golang/go/wiki/Errors#naming -var ( - // ErrEmptyInput Input must not be empty - ErrEmptyInput = statsError{"Input must not be empty."} - // ErrNaN Not a number - ErrNaN = statsError{"Not a number."} - // ErrNegative Must not contain negative values - ErrNegative = statsError{"Must not contain negative values."} - // ErrZero Must not contain zero values - ErrZero = statsError{"Must not contain zero values."} - // ErrBounds Input is outside of range - ErrBounds = statsError{"Input is outside of range."} - // ErrSize Must be the same length - ErrSize = statsError{"Must be the same length."} - // ErrInfValue Value is infinite - ErrInfValue = statsError{"Value is infinite."} - // ErrYCoord Y Value must be greater than zero - ErrYCoord = statsError{"Y Value must be greater than zero."} -) diff --git a/vendor/github.com/montanaflynn/stats/legacy.go b/vendor/github.com/montanaflynn/stats/legacy.go deleted file mode 100644 index 0f3d1e8..0000000 --- a/vendor/github.com/montanaflynn/stats/legacy.go +++ /dev/null @@ -1,49 +0,0 @@ -package stats - -// VarP is a shortcut to PopulationVariance -func VarP(input Float64Data) (sdev float64, err error) { - return PopulationVariance(input) -} - -// VarS is a shortcut to SampleVariance -func VarS(input Float64Data) (sdev float64, err error) { - return SampleVariance(input) -} - -// StdDevP is a shortcut to StandardDeviationPopulation -func StdDevP(input Float64Data) (sdev float64, err error) { - return StandardDeviationPopulation(input) -} - -// StdDevS is a shortcut to StandardDeviationSample -func StdDevS(input Float64Data) (sdev float64, err error) { - return StandardDeviationSample(input) -} - -// LinReg is a shortcut to LinearRegression -func LinReg(s []Coordinate) (regressions []Coordinate, err error) { - return LinearRegression(s) -} - -// ExpReg is a shortcut to ExponentialRegression -func ExpReg(s []Coordinate) (regressions []Coordinate, err error) { - return ExponentialRegression(s) -} - -// LogReg is a shortcut to LogarithmicRegression -func LogReg(s []Coordinate) (regressions []Coordinate, err error) { - return LogarithmicRegression(s) -} - -// Legacy error names that didn't start with Err -var ( - EmptyInputErr = ErrEmptyInput - NaNErr = ErrNaN - NegativeErr = ErrNegative - ZeroErr = ErrZero - BoundsErr = ErrBounds - SizeErr = ErrSize - InfValue = ErrInfValue - YCoordErr = ErrYCoord - EmptyInput = ErrEmptyInput -) diff --git a/vendor/github.com/montanaflynn/stats/load.go b/vendor/github.com/montanaflynn/stats/load.go deleted file mode 100644 index 0eb0e27..0000000 --- a/vendor/github.com/montanaflynn/stats/load.go +++ /dev/null @@ -1,199 +0,0 @@ -package stats - -import ( - "bufio" - "io" - "strconv" - "strings" - "time" -) - -// LoadRawData parses and converts a slice of mixed data types to floats -func LoadRawData(raw interface{}) (f Float64Data) { - var r []interface{} - var s Float64Data - - switch t := raw.(type) { - case []interface{}: - r = t - case []uint: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint8: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint16: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint32: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []uint64: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []bool: - for _, v := range t { - if v { - s = append(s, 1.0) - } else { - s = append(s, 0.0) - } - } - return s - case []float64: - return Float64Data(t) - case []int: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int8: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int16: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int32: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []int64: - for _, v := range t { - s = append(s, float64(v)) - } - return s - case []string: - for _, v := range t { - r = append(r, v) - } - case []time.Duration: - for _, v := range t { - r = append(r, v) - } - case map[int]int: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int8: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int16: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int32: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]int64: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]string: - for i := 0; i < len(t); i++ { - r = append(r, t[i]) - } - case map[int]uint: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint8: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint16: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint32: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]uint64: - for i := 0; i < len(t); i++ { - s = append(s, float64(t[i])) - } - return s - case map[int]bool: - for i := 0; i < len(t); i++ { - if t[i] { - s = append(s, 1.0) - } else { - s = append(s, 0.0) - } - } - return s - case map[int]float64: - for i := 0; i < len(t); i++ { - s = append(s, t[i]) - } - return s - case map[int]time.Duration: - for i := 0; i < len(t); i++ { - r = append(r, t[i]) - } - case string: - for _, v := range strings.Fields(t) { - r = append(r, v) - } - case io.Reader: - scanner := bufio.NewScanner(t) - for scanner.Scan() { - l := scanner.Text() - for _, v := range strings.Fields(l) { - r = append(r, v) - } - } - } - - for _, v := range r { - switch t := v.(type) { - case int: - a := float64(t) - f = append(f, a) - case uint: - f = append(f, float64(t)) - case float64: - f = append(f, t) - case string: - fl, err := strconv.ParseFloat(t, 64) - if err == nil { - f = append(f, fl) - } - case bool: - if t { - f = append(f, 1.0) - } else { - f = append(f, 0.0) - } - case time.Duration: - f = append(f, float64(t)) - } - } - return f -} diff --git a/vendor/github.com/montanaflynn/stats/max.go b/vendor/github.com/montanaflynn/stats/max.go deleted file mode 100644 index bb8c83c..0000000 --- a/vendor/github.com/montanaflynn/stats/max.go +++ /dev/null @@ -1,26 +0,0 @@ -package stats - -import ( - "math" -) - -// Max finds the highest number in a slice -func Max(input Float64Data) (max float64, err error) { - - // Return an error if there are no numbers - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - // Get the first value as the starting point - max = input.Get(0) - - // Loop and replace higher values - for i := 1; i < input.Len(); i++ { - if input.Get(i) > max { - max = input.Get(i) - } - } - - return max, nil -} diff --git a/vendor/github.com/montanaflynn/stats/mean.go b/vendor/github.com/montanaflynn/stats/mean.go deleted file mode 100644 index a78d299..0000000 --- a/vendor/github.com/montanaflynn/stats/mean.go +++ /dev/null @@ -1,60 +0,0 @@ -package stats - -import "math" - -// Mean gets the average of a slice of numbers -func Mean(input Float64Data) (float64, error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - sum, _ := input.Sum() - - return sum / float64(input.Len()), nil -} - -// GeometricMean gets the geometric mean for a slice of numbers -func GeometricMean(input Float64Data) (float64, error) { - - l := input.Len() - if l == 0 { - return math.NaN(), EmptyInputErr - } - - // Get the product of all the numbers - var p float64 - for _, n := range input { - if p == 0 { - p = n - } else { - p *= n - } - } - - // Calculate the geometric mean - return math.Pow(p, 1/float64(l)), nil -} - -// HarmonicMean gets the harmonic mean for a slice of numbers -func HarmonicMean(input Float64Data) (float64, error) { - - l := input.Len() - if l == 0 { - return math.NaN(), EmptyInputErr - } - - // Get the sum of all the numbers reciprocals and return an - // error for values that cannot be included in harmonic mean - var p float64 - for _, n := range input { - if n < 0 { - return math.NaN(), NegativeErr - } else if n == 0 { - return math.NaN(), ZeroErr - } - p += (1 / n) - } - - return float64(l) / p, nil -} diff --git a/vendor/github.com/montanaflynn/stats/median.go b/vendor/github.com/montanaflynn/stats/median.go deleted file mode 100644 index a678c36..0000000 --- a/vendor/github.com/montanaflynn/stats/median.go +++ /dev/null @@ -1,25 +0,0 @@ -package stats - -import "math" - -// Median gets the median number in a slice of numbers -func Median(input Float64Data) (median float64, err error) { - - // Start by sorting a copy of the slice - c := sortedCopy(input) - - // No math is needed if there are no numbers - // For even numbers we add the two middle numbers - // and divide by two using the mean function above - // For odd numbers we just use the middle number - l := len(c) - if l == 0 { - return math.NaN(), EmptyInputErr - } else if l%2 == 0 { - median, _ = Mean(c[l/2-1 : l/2+1]) - } else { - median = c[l/2] - } - - return median, nil -} diff --git a/vendor/github.com/montanaflynn/stats/min.go b/vendor/github.com/montanaflynn/stats/min.go deleted file mode 100644 index bf7e70a..0000000 --- a/vendor/github.com/montanaflynn/stats/min.go +++ /dev/null @@ -1,26 +0,0 @@ -package stats - -import "math" - -// Min finds the lowest number in a set of data -func Min(input Float64Data) (min float64, err error) { - - // Get the count of numbers in the slice - l := input.Len() - - // Return an error if there are no numbers - if l == 0 { - return math.NaN(), EmptyInputErr - } - - // Get the first value as the starting point - min = input.Get(0) - - // Iterate until done checking for a lower value - for i := 1; i < l; i++ { - if input.Get(i) < min { - min = input.Get(i) - } - } - return min, nil -} diff --git a/vendor/github.com/montanaflynn/stats/mode.go b/vendor/github.com/montanaflynn/stats/mode.go deleted file mode 100644 index a7cf9f7..0000000 --- a/vendor/github.com/montanaflynn/stats/mode.go +++ /dev/null @@ -1,47 +0,0 @@ -package stats - -// Mode gets the mode [most frequent value(s)] of a slice of float64s -func Mode(input Float64Data) (mode []float64, err error) { - // Return the input if there's only one number - l := input.Len() - if l == 1 { - return input, nil - } else if l == 0 { - return nil, EmptyInputErr - } - - c := sortedCopyDif(input) - // Traverse sorted array, - // tracking the longest repeating sequence - mode = make([]float64, 5) - cnt, maxCnt := 1, 1 - for i := 1; i < l; i++ { - switch { - case c[i] == c[i-1]: - cnt++ - case cnt == maxCnt && maxCnt != 1: - mode = append(mode, c[i-1]) - cnt = 1 - case cnt > maxCnt: - mode = append(mode[:0], c[i-1]) - maxCnt, cnt = cnt, 1 - default: - cnt = 1 - } - } - switch { - case cnt == maxCnt: - mode = append(mode, c[l-1]) - case cnt > maxCnt: - mode = append(mode[:0], c[l-1]) - maxCnt = cnt - } - - // Since length must be greater than 1, - // check for slices of distinct values - if maxCnt == 1 || len(mode)*maxCnt == l && maxCnt != l { - return Float64Data{}, nil - } - - return mode, nil -} diff --git a/vendor/github.com/montanaflynn/stats/norm.go b/vendor/github.com/montanaflynn/stats/norm.go deleted file mode 100644 index 4eb8eb8..0000000 --- a/vendor/github.com/montanaflynn/stats/norm.go +++ /dev/null @@ -1,254 +0,0 @@ -package stats - -import ( - "math" - "math/rand" - "strings" - "time" -) - -// NormPpfRvs generates random variates using the Point Percentile Function. -// For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ -func NormPpfRvs(loc float64, scale float64, size int) []float64 { - rand.Seed(time.Now().UnixNano()) - var toReturn []float64 - for i := 0; i < size; i++ { - toReturn = append(toReturn, NormPpf(rand.Float64(), loc, scale)) - } - return toReturn -} - -// NormBoxMullerRvs generates random variates using the Box–Muller transform. -// For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html -func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 { - rand.Seed(time.Now().UnixNano()) - var toReturn []float64 - for i := 0; i < int(float64(size/2)+float64(size%2)); i++ { - // u1 and u2 are uniformly distributed random numbers between 0 and 1. - u1 := rand.Float64() - u2 := rand.Float64() - // x1 and x2 are normally distributed random numbers. - x1 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2))) - toReturn = append(toReturn, x1) - if (i+1)*2 <= size { - x2 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Sin(2*math.Pi*u2))) - toReturn = append(toReturn, x2) - } - } - return toReturn -} - -// NormPdf is the probability density function. -func NormPdf(x float64, loc float64, scale float64) float64 { - return (math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi)) -} - -// NormLogPdf is the log of the probability density function. -func NormLogPdf(x float64, loc float64, scale float64) float64 { - return math.Log((math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi))) -} - -// NormCdf is the cumulative distribution function. -func NormCdf(x float64, loc float64, scale float64) float64 { - return 0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2)))) -} - -// NormLogCdf is the log of the cumulative distribution function. -func NormLogCdf(x float64, loc float64, scale float64) float64 { - return math.Log(0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2))))) -} - -// NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). -func NormSf(x float64, loc float64, scale float64) float64 { - return 1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2)))) -} - -// NormLogSf is the log of the survival function. -func NormLogSf(x float64, loc float64, scale float64) float64 { - return math.Log(1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2))))) -} - -// NormPpf is the point percentile function. -// This is based on Peter John Acklam's inverse normal CDF. -// algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). -// For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ -func NormPpf(p float64, loc float64, scale float64) (x float64) { - const ( - a1 = -3.969683028665376e+01 - a2 = 2.209460984245205e+02 - a3 = -2.759285104469687e+02 - a4 = 1.383577518672690e+02 - a5 = -3.066479806614716e+01 - a6 = 2.506628277459239e+00 - - b1 = -5.447609879822406e+01 - b2 = 1.615858368580409e+02 - b3 = -1.556989798598866e+02 - b4 = 6.680131188771972e+01 - b5 = -1.328068155288572e+01 - - c1 = -7.784894002430293e-03 - c2 = -3.223964580411365e-01 - c3 = -2.400758277161838e+00 - c4 = -2.549732539343734e+00 - c5 = 4.374664141464968e+00 - c6 = 2.938163982698783e+00 - - d1 = 7.784695709041462e-03 - d2 = 3.224671290700398e-01 - d3 = 2.445134137142996e+00 - d4 = 3.754408661907416e+00 - - plow = 0.02425 - phigh = 1 - plow - ) - - if p < 0 || p > 1 { - return math.NaN() - } else if p == 0 { - return -math.Inf(0) - } else if p == 1 { - return math.Inf(0) - } - - if p < plow { - q := math.Sqrt(-2 * math.Log(p)) - x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / - ((((d1*q+d2)*q+d3)*q+d4)*q + 1) - } else if phigh < p { - q := math.Sqrt(-2 * math.Log(1-p)) - x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / - ((((d1*q+d2)*q+d3)*q+d4)*q + 1) - } else { - q := p - 0.5 - r := q * q - x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q / - (((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1) - } - - e := 0.5*math.Erfc(-x/math.Sqrt2) - p - u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2) - x = x - u/(1+x*u/2) - - return x*scale + loc -} - -// NormIsf is the inverse survival function (inverse of sf). -func NormIsf(p float64, loc float64, scale float64) (x float64) { - if -NormPpf(p, loc, scale) == 0 { - return 0 - } - return -NormPpf(p, loc, scale) -} - -// NormMoment approximates the non-central (raw) moment of order n. -// For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution -func NormMoment(n int, loc float64, scale float64) float64 { - toReturn := 0.0 - for i := 0; i < n+1; i++ { - if (n-i)%2 == 0 { - toReturn += float64(Ncr(n, i)) * (math.Pow(loc, float64(i))) * (math.Pow(scale, float64(n-i))) * - (float64(factorial(n-i)) / ((math.Pow(2.0, float64((n-i)/2))) * - float64(factorial((n-i)/2)))) - } - } - return toReturn -} - -// NormStats returns the mean, variance, skew, and/or kurtosis. -// Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). -// Takes string containing any of 'mvsk'. -// Returns array of m v s k in that order. -func NormStats(loc float64, scale float64, moments string) []float64 { - var toReturn []float64 - if strings.ContainsAny(moments, "m") { - toReturn = append(toReturn, loc) - } - if strings.ContainsAny(moments, "v") { - toReturn = append(toReturn, math.Pow(scale, 2)) - } - if strings.ContainsAny(moments, "s") { - toReturn = append(toReturn, 0.0) - } - if strings.ContainsAny(moments, "k") { - toReturn = append(toReturn, 0.0) - } - return toReturn -} - -// NormEntropy is the differential entropy of the RV. -func NormEntropy(loc float64, scale float64) float64 { - return math.Log(scale * math.Sqrt(2*math.Pi*math.E)) -} - -// NormFit returns the maximum likelihood estimators for the Normal Distribution. -// Takes array of float64 values. -// Returns array of Mean followed by Standard Deviation. -func NormFit(data []float64) [2]float64 { - sum := 0.00 - for i := 0; i < len(data); i++ { - sum += data[i] - } - mean := sum / float64(len(data)) - stdNumerator := 0.00 - for i := 0; i < len(data); i++ { - stdNumerator += math.Pow(data[i]-mean, 2) - } - return [2]float64{mean, math.Sqrt((stdNumerator) / (float64(len(data))))} -} - -// NormMedian is the median of the distribution. -func NormMedian(loc float64, scale float64) float64 { - return loc -} - -// NormMean is the mean/expected value of the distribution. -func NormMean(loc float64, scale float64) float64 { - return loc -} - -// NormVar is the variance of the distribution. -func NormVar(loc float64, scale float64) float64 { - return math.Pow(scale, 2) -} - -// NormStd is the standard deviation of the distribution. -func NormStd(loc float64, scale float64) float64 { - return scale -} - -// NormInterval finds endpoints of the range that contains alpha percent of the distribution. -func NormInterval(alpha float64, loc float64, scale float64) [2]float64 { - q1 := (1.0 - alpha) / 2 - q2 := (1.0 + alpha) / 2 - a := NormPpf(q1, loc, scale) - b := NormPpf(q2, loc, scale) - return [2]float64{a, b} -} - -// factorial is the naive factorial algorithm. -func factorial(x int) int { - if x == 0 { - return 1 - } - return x * factorial(x-1) -} - -// Ncr is an N choose R algorithm. -// Aaron Cannon's algorithm. -func Ncr(n, r int) int { - if n <= 1 || r == 0 || n == r { - return 1 - } - if newR := n - r; newR < r { - r = newR - } - if r == 1 { - return n - } - ret := int(n - r + 1) - for i, j := ret+1, int(2); j <= r; i, j = i+1, j+1 { - ret = ret * i / j - } - return ret -} diff --git a/vendor/github.com/montanaflynn/stats/outlier.go b/vendor/github.com/montanaflynn/stats/outlier.go deleted file mode 100644 index 7c9795b..0000000 --- a/vendor/github.com/montanaflynn/stats/outlier.go +++ /dev/null @@ -1,44 +0,0 @@ -package stats - -// Outliers holds mild and extreme outliers found in data -type Outliers struct { - Mild Float64Data - Extreme Float64Data -} - -// QuartileOutliers finds the mild and extreme outliers -func QuartileOutliers(input Float64Data) (Outliers, error) { - if input.Len() == 0 { - return Outliers{}, EmptyInputErr - } - - // Start by sorting a copy of the slice - copy := sortedCopy(input) - - // Calculate the quartiles and interquartile range - qs, _ := Quartile(copy) - iqr, _ := InterQuartileRange(copy) - - // Calculate the lower and upper inner and outer fences - lif := qs.Q1 - (1.5 * iqr) - uif := qs.Q3 + (1.5 * iqr) - lof := qs.Q1 - (3 * iqr) - uof := qs.Q3 + (3 * iqr) - - // Find the data points that are outside of the - // inner and upper fences and add them to mild - // and extreme outlier slices - var mild Float64Data - var extreme Float64Data - for _, v := range copy { - - if v < lof || v > uof { - extreme = append(extreme, v) - } else if v < lif || v > uif { - mild = append(mild, v) - } - } - - // Wrap them into our struct - return Outliers{mild, extreme}, nil -} diff --git a/vendor/github.com/montanaflynn/stats/percentile.go b/vendor/github.com/montanaflynn/stats/percentile.go deleted file mode 100644 index f564178..0000000 --- a/vendor/github.com/montanaflynn/stats/percentile.go +++ /dev/null @@ -1,86 +0,0 @@ -package stats - -import ( - "math" -) - -// Percentile finds the relative standing in a slice of floats -func Percentile(input Float64Data, percent float64) (percentile float64, err error) { - length := input.Len() - if length == 0 { - return math.NaN(), EmptyInputErr - } - - if length == 1 { - return input[0], nil - } - - if percent <= 0 || percent > 100 { - return math.NaN(), BoundsErr - } - - // Start by sorting a copy of the slice - c := sortedCopy(input) - - // Multiply percent by length of input - index := (percent / 100) * float64(len(c)) - - // Check if the index is a whole number - if index == float64(int64(index)) { - - // Convert float to int - i := int(index) - - // Find the value at the index - percentile = c[i-1] - - } else if index > 1 { - - // Convert float to int via truncation - i := int(index) - - // Find the average of the index and following values - percentile, _ = Mean(Float64Data{c[i-1], c[i]}) - - } else { - return math.NaN(), BoundsErr - } - - return percentile, nil - -} - -// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method -func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { - - // Find the length of items in the slice - il := input.Len() - - // Return an error for empty slices - if il == 0 { - return math.NaN(), EmptyInputErr - } - - // Return error for less than 0 or greater than 100 percentages - if percent < 0 || percent > 100 { - return math.NaN(), BoundsErr - } - - // Start by sorting a copy of the slice - c := sortedCopy(input) - - // Return the last item - if percent == 100.0 { - return c[il-1], nil - } - - // Find ordinal ranking - or := int(math.Ceil(float64(il) * percent / 100)) - - // Return the item that is in the place of the ordinal rank - if or == 0 { - return c[0], nil - } - return c[or-1], nil - -} diff --git a/vendor/github.com/montanaflynn/stats/quartile.go b/vendor/github.com/montanaflynn/stats/quartile.go deleted file mode 100644 index 40bbf6e..0000000 --- a/vendor/github.com/montanaflynn/stats/quartile.go +++ /dev/null @@ -1,74 +0,0 @@ -package stats - -import "math" - -// Quartiles holds the three quartile points -type Quartiles struct { - Q1 float64 - Q2 float64 - Q3 float64 -} - -// Quartile returns the three quartile points from a slice of data -func Quartile(input Float64Data) (Quartiles, error) { - - il := input.Len() - if il == 0 { - return Quartiles{}, EmptyInputErr - } - - // Start by sorting a copy of the slice - copy := sortedCopy(input) - - // Find the cutoff places depeding on if - // the input slice length is even or odd - var c1 int - var c2 int - if il%2 == 0 { - c1 = il / 2 - c2 = il / 2 - } else { - c1 = (il - 1) / 2 - c2 = c1 + 1 - } - - // Find the Medians with the cutoff points - Q1, _ := Median(copy[:c1]) - Q2, _ := Median(copy) - Q3, _ := Median(copy[c2:]) - - return Quartiles{Q1, Q2, Q3}, nil - -} - -// InterQuartileRange finds the range between Q1 and Q3 -func InterQuartileRange(input Float64Data) (float64, error) { - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - qs, _ := Quartile(input) - iqr := qs.Q3 - qs.Q1 - return iqr, nil -} - -// Midhinge finds the average of the first and third quartiles -func Midhinge(input Float64Data) (float64, error) { - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - qs, _ := Quartile(input) - mh := (qs.Q1 + qs.Q3) / 2 - return mh, nil -} - -// Trimean finds the average of the median and the midhinge -func Trimean(input Float64Data) (float64, error) { - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - c := sortedCopy(input) - q, _ := Quartile(c) - - return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil -} diff --git a/vendor/github.com/montanaflynn/stats/ranksum.go b/vendor/github.com/montanaflynn/stats/ranksum.go deleted file mode 100644 index fc424ef..0000000 --- a/vendor/github.com/montanaflynn/stats/ranksum.go +++ /dev/null @@ -1,183 +0,0 @@ -package stats - -// import "math" -// -// // WilcoxonRankSum tests the null hypothesis that two sets -// // of data are drawn from the same distribution. It does -// // not handle ties between measurements in x and y. -// // -// // Parameters: -// // data1 Float64Data: First set of data points. -// // data2 Float64Data: Second set of data points. -// // Length of both data samples must be equal. -// // -// // Return: -// // statistic float64: The test statistic under the -// // large-sample approximation that the -// // rank sum statistic is normally distributed. -// // pvalue float64: The two-sided p-value of the test -// // err error: Any error from the input data parameters -// // -// // https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test -// func WilcoxonRankSum(data1, data2 Float64Data) (float64, float64, error) { -// -// l1 := data1.Len() -// l2 := data2.Len() -// -// if l1 == 0 || l2 == 0 { -// return math.NaN(), math.NaN(), EmptyInputErr -// } -// -// if l1 != l2 { -// return math.NaN(), math.NaN(), SizeErr -// } -// -// alldata := Float64Data{} -// alldata = append(alldata, data1...) -// alldata = append(alldata, data2...) -// -// // ranked := -// -// return 0.0, 0.0, nil -// } -// -// // x, y = map(np.asarray, (x, y)) -// // n1 = len(x) -// // n2 = len(y) -// // alldata = np.concatenate((x, y)) -// // ranked = rankdata(alldata) -// // x = ranked[:n1] -// // s = np.sum(x, axis=0) -// // expected = n1 * (n1+n2+1) / 2.0 -// // z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) -// // prob = 2 * distributions.norm.sf(abs(z)) -// // -// // return RanksumsResult(z, prob) -// -// // def rankdata(a, method='average'): -// // """ -// // Assign ranks to data, dealing with ties appropriately. -// // Ranks begin at 1. The `method` argument controls how ranks are assigned -// // to equal values. See [1]_ for further discussion of ranking methods. -// // Parameters -// // ---------- -// // a : array_like -// // The array of values to be ranked. The array is first flattened. -// // method : str, optional -// // The method used to assign ranks to tied elements. -// // The options are 'average', 'min', 'max', 'dense' and 'ordinal'. -// // 'average': -// // The average of the ranks that would have been assigned to -// // all the tied values is assigned to each value. -// // 'min': -// // The minimum of the ranks that would have been assigned to all -// // the tied values is assigned to each value. (This is also -// // referred to as "competition" ranking.) -// // 'max': -// // The maximum of the ranks that would have been assigned to all -// // the tied values is assigned to each value. -// // 'dense': -// // Like 'min', but the rank of the next highest element is assigned -// // the rank immediately after those assigned to the tied elements. -// // 'ordinal': -// // All values are given a distinct rank, corresponding to the order -// // that the values occur in `a`. -// // The default is 'average'. -// // Returns -// // ------- -// // ranks : ndarray -// // An array of length equal to the size of `a`, containing rank -// // scores. -// // References -// // ---------- -// // .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking -// // Examples -// // -------- -// // >>> from scipy.stats import rankdata -// // >>> rankdata([0, 2, 3, 2]) -// // array([ 1. , 2.5, 4. , 2.5]) -// // """ -// // -// // arr = np.ravel(np.asarray(a)) -// // algo = 'quicksort' -// // sorter = np.argsort(arr, kind=algo) -// // -// // inv = np.empty(sorter.size, dtype=np.intp) -// // inv[sorter] = np.arange(sorter.size, dtype=np.intp) -// // -// // -// // arr = arr[sorter] -// // obs = np.r_[True, arr[1:] != arr[:-1]] -// // dense = obs.cumsum()[inv] -// // -// // -// // # cumulative counts of each unique value -// // count = np.r_[np.nonzero(obs)[0], len(obs)] -// // -// // # average method -// // return .5 * (count[dense] + count[dense - 1] + 1) -// -// type rankable interface { -// Len() int -// RankEqual(int, int) bool -// } -// -// func StandardRank(d rankable) []float64 { -// r := make([]float64, d.Len()) -// var k int -// for i := range r { -// if i == 0 || !d.RankEqual(i, i-1) { -// k = i + 1 -// } -// r[i] = float64(k) -// } -// return r -// } -// -// func ModifiedRank(d rankable) []float64 { -// r := make([]float64, d.Len()) -// for i := range r { -// k := i + 1 -// for j := i + 1; j < len(r) && d.RankEqual(i, j); j++ { -// k = j + 1 -// } -// r[i] = float64(k) -// } -// return r -// } -// -// func DenseRank(d rankable) []float64 { -// r := make([]float64, d.Len()) -// var k int -// for i := range r { -// if i == 0 || !d.RankEqual(i, i-1) { -// k++ -// } -// r[i] = float64(k) -// } -// return r -// } -// -// func OrdinalRank(d rankable) []float64 { -// r := make([]float64, d.Len()) -// for i := range r { -// r[i] = float64(i + 1) -// } -// return r -// } -// -// func FractionalRank(d rankable) []float64 { -// r := make([]float64, d.Len()) -// for i := 0; i < len(r); { -// var j int -// f := float64(i + 1) -// for j = i + 1; j < len(r) && d.RankEqual(i, j); j++ { -// f += float64(j + 1) -// } -// f /= float64(j - i) -// for ; i < j; i++ { -// r[i] = f -// } -// } -// return r -// } diff --git a/vendor/github.com/montanaflynn/stats/regression.go b/vendor/github.com/montanaflynn/stats/regression.go deleted file mode 100644 index 401d951..0000000 --- a/vendor/github.com/montanaflynn/stats/regression.go +++ /dev/null @@ -1,113 +0,0 @@ -package stats - -import "math" - -// Series is a container for a series of data -type Series []Coordinate - -// Coordinate holds the data in a series -type Coordinate struct { - X, Y float64 -} - -// LinearRegression finds the least squares linear regression on data series -func LinearRegression(s Series) (regressions Series, err error) { - - if len(s) == 0 { - return nil, EmptyInputErr - } - - // Placeholder for the math to be done - var sum [5]float64 - - // Loop over data keeping index in place - i := 0 - for ; i < len(s); i++ { - sum[0] += s[i].X - sum[1] += s[i].Y - sum[2] += s[i].X * s[i].X - sum[3] += s[i].X * s[i].Y - sum[4] += s[i].Y * s[i].Y - } - - // Find gradient and intercept - f := float64(i) - gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0]) - intercept := (sum[1] / f) - (gradient * sum[0] / f) - - // Create the new regression series - for j := 0; j < len(s); j++ { - regressions = append(regressions, Coordinate{ - X: s[j].X, - Y: s[j].X*gradient + intercept, - }) - } - - return regressions, nil -} - -// ExponentialRegression returns an exponential regression on data series -func ExponentialRegression(s Series) (regressions Series, err error) { - - if len(s) == 0 { - return nil, EmptyInputErr - } - - var sum [6]float64 - - for i := 0; i < len(s); i++ { - if s[i].Y < 0 { - return nil, YCoordErr - } - sum[0] += s[i].X - sum[1] += s[i].Y - sum[2] += s[i].X * s[i].X * s[i].Y - sum[3] += s[i].Y * math.Log(s[i].Y) - sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y) - sum[5] += s[i].X * s[i].Y - } - - denominator := (sum[1]*sum[2] - sum[5]*sum[5]) - a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator) - b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator - - for j := 0; j < len(s); j++ { - regressions = append(regressions, Coordinate{ - X: s[j].X, - Y: a * math.Exp(b*s[j].X), - }) - } - - return regressions, nil -} - -// LogarithmicRegression returns an logarithmic regression on data series -func LogarithmicRegression(s Series) (regressions Series, err error) { - - if len(s) == 0 { - return nil, EmptyInputErr - } - - var sum [4]float64 - - i := 0 - for ; i < len(s); i++ { - sum[0] += math.Log(s[i].X) - sum[1] += s[i].Y * math.Log(s[i].X) - sum[2] += s[i].Y - sum[3] += math.Pow(math.Log(s[i].X), 2) - } - - f := float64(i) - a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0]) - b := (sum[2] - a*sum[0]) / f - - for j := 0; j < len(s); j++ { - regressions = append(regressions, Coordinate{ - X: s[j].X, - Y: b + a*math.Log(s[j].X), - }) - } - - return regressions, nil -} diff --git a/vendor/github.com/montanaflynn/stats/round.go b/vendor/github.com/montanaflynn/stats/round.go deleted file mode 100644 index b66779c..0000000 --- a/vendor/github.com/montanaflynn/stats/round.go +++ /dev/null @@ -1,38 +0,0 @@ -package stats - -import "math" - -// Round a float to a specific decimal place or precision -func Round(input float64, places int) (rounded float64, err error) { - - // If the float is not a number - if math.IsNaN(input) { - return math.NaN(), NaNErr - } - - // Find out the actual sign and correct the input for later - sign := 1.0 - if input < 0 { - sign = -1 - input *= -1 - } - - // Use the places arg to get the amount of precision wanted - precision := math.Pow(10, float64(places)) - - // Find the decimal place we are looking to round - digit := input * precision - - // Get the actual decimal number as a fraction to be compared - _, decimal := math.Modf(digit) - - // If the decimal is less than .5 we round down otherwise up - if decimal >= 0.5 { - rounded = math.Ceil(digit) - } else { - rounded = math.Floor(digit) - } - - // Finally we do the math to actually create a rounded number - return rounded / precision * sign, nil -} diff --git a/vendor/github.com/montanaflynn/stats/sample.go b/vendor/github.com/montanaflynn/stats/sample.go deleted file mode 100644 index 40166af..0000000 --- a/vendor/github.com/montanaflynn/stats/sample.go +++ /dev/null @@ -1,76 +0,0 @@ -package stats - -import ( - "math/rand" - "sort" -) - -// Sample returns sample from input with replacement or without -func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) { - - if input.Len() == 0 { - return nil, EmptyInputErr - } - - length := input.Len() - if replacement { - - result := Float64Data{} - rand.Seed(unixnano()) - - // In every step, randomly take the num for - for i := 0; i < takenum; i++ { - idx := rand.Intn(length) - result = append(result, input[idx]) - } - - return result, nil - - } else if !replacement && takenum <= length { - - rand.Seed(unixnano()) - - // Get permutation of number of indexies - perm := rand.Perm(length) - result := Float64Data{} - - // Get element of input by permutated index - for _, idx := range perm[0:takenum] { - result = append(result, input[idx]) - } - - return result, nil - - } - - return nil, BoundsErr -} - -// StableSample like stable sort, it returns samples from input while keeps the order of original data. -func StableSample(input Float64Data, takenum int) ([]float64, error) { - if input.Len() == 0 { - return nil, EmptyInputErr - } - - length := input.Len() - - if takenum <= length { - - rand.Seed(unixnano()) - - perm := rand.Perm(length) - perm = perm[0:takenum] - // Sort perm before applying - sort.Ints(perm) - result := Float64Data{} - - for _, idx := range perm { - result = append(result, input[idx]) - } - - return result, nil - - } - - return nil, BoundsErr -} diff --git a/vendor/github.com/montanaflynn/stats/sigmoid.go b/vendor/github.com/montanaflynn/stats/sigmoid.go deleted file mode 100644 index 5f2559d..0000000 --- a/vendor/github.com/montanaflynn/stats/sigmoid.go +++ /dev/null @@ -1,18 +0,0 @@ -package stats - -import "math" - -// Sigmoid returns the input values in the range of -1 to 1 -// along the sigmoid or s-shaped curve, commonly used in -// machine learning while training neural networks as an -// activation function. -func Sigmoid(input Float64Data) ([]float64, error) { - if input.Len() == 0 { - return Float64Data{}, EmptyInput - } - s := make([]float64, len(input)) - for i, v := range input { - s[i] = 1 / (1 + math.Exp(-v)) - } - return s, nil -} diff --git a/vendor/github.com/montanaflynn/stats/softmax.go b/vendor/github.com/montanaflynn/stats/softmax.go deleted file mode 100644 index 8507264..0000000 --- a/vendor/github.com/montanaflynn/stats/softmax.go +++ /dev/null @@ -1,25 +0,0 @@ -package stats - -import "math" - -// SoftMax returns the input values in the range of 0 to 1 -// with sum of all the probabilities being equal to one. It -// is commonly used in machine learning neural networks. -func SoftMax(input Float64Data) ([]float64, error) { - if input.Len() == 0 { - return Float64Data{}, EmptyInput - } - - s := 0.0 - c, _ := Max(input) - for _, e := range input { - s += math.Exp(e - c) - } - - sm := make([]float64, len(input)) - for i, v := range input { - sm[i] = math.Exp(v-c) / s - } - - return sm, nil -} diff --git a/vendor/github.com/montanaflynn/stats/sum.go b/vendor/github.com/montanaflynn/stats/sum.go deleted file mode 100644 index 15b611d..0000000 --- a/vendor/github.com/montanaflynn/stats/sum.go +++ /dev/null @@ -1,18 +0,0 @@ -package stats - -import "math" - -// Sum adds all the numbers of a slice together -func Sum(input Float64Data) (sum float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - // Add em up - for _, n := range input { - sum += n - } - - return sum, nil -} diff --git a/vendor/github.com/montanaflynn/stats/util.go b/vendor/github.com/montanaflynn/stats/util.go deleted file mode 100644 index 8819976..0000000 --- a/vendor/github.com/montanaflynn/stats/util.go +++ /dev/null @@ -1,43 +0,0 @@ -package stats - -import ( - "sort" - "time" -) - -// float64ToInt rounds a float64 to an int -func float64ToInt(input float64) (output int) { - r, _ := Round(input, 0) - return int(r) -} - -// unixnano returns nanoseconds from UTC epoch -func unixnano() int64 { - return time.Now().UTC().UnixNano() -} - -// copyslice copies a slice of float64s -func copyslice(input Float64Data) Float64Data { - s := make(Float64Data, input.Len()) - copy(s, input) - return s -} - -// sortedCopy returns a sorted copy of float64s -func sortedCopy(input Float64Data) (copy Float64Data) { - copy = copyslice(input) - sort.Float64s(copy) - return -} - -// sortedCopyDif returns a sorted copy of float64s -// only if the original data isn't sorted. -// Only use this if returned slice won't be manipulated! -func sortedCopyDif(input Float64Data) (copy Float64Data) { - if sort.Float64sAreSorted(input) { - return input - } - copy = copyslice(input) - sort.Float64s(copy) - return -} diff --git a/vendor/github.com/montanaflynn/stats/variance.go b/vendor/github.com/montanaflynn/stats/variance.go deleted file mode 100644 index a644569..0000000 --- a/vendor/github.com/montanaflynn/stats/variance.go +++ /dev/null @@ -1,105 +0,0 @@ -package stats - -import "math" - -// _variance finds the variance for both population and sample data -func _variance(input Float64Data, sample int) (variance float64, err error) { - - if input.Len() == 0 { - return math.NaN(), EmptyInputErr - } - - // Sum the square of the mean subtracted from each number - m, _ := Mean(input) - - for _, n := range input { - variance += (n - m) * (n - m) - } - - // When getting the mean of the squared differences - // "sample" will allow us to know if it's a sample - // or population and wether to subtract by one or not - return variance / float64((input.Len() - (1 * sample))), nil -} - -// Variance the amount of variation in the dataset -func Variance(input Float64Data) (sdev float64, err error) { - return PopulationVariance(input) -} - -// PopulationVariance finds the amount of variance within a population -func PopulationVariance(input Float64Data) (pvar float64, err error) { - - v, err := _variance(input, 0) - if err != nil { - return math.NaN(), err - } - - return v, nil -} - -// SampleVariance finds the amount of variance within a sample -func SampleVariance(input Float64Data) (svar float64, err error) { - - v, err := _variance(input, 1) - if err != nil { - return math.NaN(), err - } - - return v, nil -} - -// Covariance is a measure of how much two sets of data change -func Covariance(data1, data2 Float64Data) (float64, error) { - - l1 := data1.Len() - l2 := data2.Len() - - if l1 == 0 || l2 == 0 { - return math.NaN(), EmptyInputErr - } - - if l1 != l2 { - return math.NaN(), SizeErr - } - - m1, _ := Mean(data1) - m2, _ := Mean(data2) - - // Calculate sum of squares - var ss float64 - for i := 0; i < l1; i++ { - delta1 := (data1.Get(i) - m1) - delta2 := (data2.Get(i) - m2) - ss += (delta1*delta2 - ss) / float64(i+1) - } - - return ss * float64(l1) / float64(l1-1), nil -} - -// CovariancePopulation computes covariance for entire population between two variables. -func CovariancePopulation(data1, data2 Float64Data) (float64, error) { - - l1 := data1.Len() - l2 := data2.Len() - - if l1 == 0 || l2 == 0 { - return math.NaN(), EmptyInputErr - } - - if l1 != l2 { - return math.NaN(), SizeErr - } - - m1, _ := Mean(data1) - m2, _ := Mean(data2) - - var s float64 - for i := 0; i < l1; i++ { - delta1 := (data1.Get(i) - m1) - delta2 := (data2.Get(i) - m2) - s += delta1 * delta2 - } - - return s / float64(l1), nil -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de0..0000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e..0000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cd..0000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb..0000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932ead..0000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2..0000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d..0000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a834..0000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore deleted file mode 100644 index 1fb13ab..0000000 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -logrus -vendor - -.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml deleted file mode 100644 index 65dc285..0000000 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ /dev/null @@ -1,40 +0,0 @@ -run: - # do not run on test files yet - tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - -linters: - enable: - - megacheck - - govet - disable: - - maligned - - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml deleted file mode 100644 index c1dbd5a..0000000 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/sirupsen/logrus -git: - depth: 1 -env: - - GO111MODULE=on -go: 1.15.x -os: linux -install: - - ./travis/install.sh -script: - - cd ci - - go run mage.go -v -w ../ crossBuild - - go run mage.go -v -w ../ lint - - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 7567f61..0000000 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,259 +0,0 @@ -# 1.8.1 -Code quality: - * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer - * improve timestamp format documentation - -Fixes: - * fix race condition on logger hooks - - -# 1.8.0 - -Correct versioning number replacing v1.7.1. - -# 1.7.1 - -Beware this release has introduced a new public API and its semver is therefore incorrect. - -Code quality: - * use go 1.15 in travis - * use magefile as task runner - -Fixes: - * small fixes about new go 1.13 error formatting system - * Fix for long time race condiction with mutating data hooks - -Features: - * build support for zos - -# 1.7.0 -Fixes: - * the dependency toward a windows terminal library has been removed - -Features: - * a new buffer pool management API has been added - * a set of `Fn()` functions have been added - -# 1.6.0 -Fixes: - * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances - * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 - -Features: - * add an option to the `TextFormatter` to completely disable fields quoting - -# 1.5.0 -Code quality: - * add golangci linter run on travis - -Fixes: - * add mutex for hooks concurrent access on `Entry` data - * caller function field for go1.14 - * fix build issue for gopherjs target - -Feature: - * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level - * add a `DisableHTMLEscape` option in the `JSONFormatter` - * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` - -# 1.4.2 - * Fixes build break for plan9, nacl, solaris -# 1.4.1 -This new release introduces: - * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) - -Fixes: - * Fix Entry.WithContext method to return a copy of the initial entry (#941) - -# 1.4.0 -This new release introduces: - * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) - * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). - -Fixes: - * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). - * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) - * Fix infinite recursion on unknown `Level.String()` (#907) - * Fix race condition in `getCaller` (#916). - - -# 1.3.0 -This new release introduces: - * Log, Logf, Logln functions for Logger and Entry that take a Level - -Fixes: - * Building prometheus node_exporter on AIX (#840) - * Race condition in TextFormatter (#468) - * Travis CI import path (#868) - * Remove coloured output on Windows (#862) - * Pointer to func as field in JSONFormatter (#870) - * Properly marshal Levels (#873) - -# 1.2.0 -This new release introduces: - * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued - * A new trace level named `Trace` whose level is below `Debug` - * A configurable exit function to be called upon a Fatal trace - * The `Level` object now implements `encoding.TextUnmarshaler` interface - -# 1.1.1 -This is a bug fix release. - * fix the build break on Solaris - * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized - -# 1.1.0 -This new release introduces: - * several fixes: - * a fix for a race condition on entry formatting - * proper cleanup of previously used entries before putting them back in the pool - * the extra new line at the end of message in text formatter has been removed - * a new global public API to check if a level is activated: IsLevelEnabled - * the following methods have been added to the Logger object - * IsLevelEnabled - * SetFormatter - * SetOutput - * ReplaceHooks - * introduction of go module - * an indent configuration for the json formatter - * output colour support for windows - * the field sort function is now configurable for text formatter - * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater - -# 1.0.6 - -This new release introduces: - * a new api WithTime which allows to easily force the time of the log entry - which is mostly useful for logger wrapper - * a fix reverting the immutability of the entry given as parameter to the hooks - a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary - * a new SetOutput method in the Logger - * a new configuration of the textformatter to configure the name of the default keys - * a new configuration of the text formatter to disable the level truncation - -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb4..0000000 --- a/vendor/github.com/sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md deleted file mode 100644 index b042c89..0000000 --- a/vendor/github.com/sirupsen/logrus/README.md +++ /dev/null @@ -1,513 +0,0 @@ -# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Logrus is in maintenance-mode.** We will not be introducing new features. It's -simply too hard to do in a way that won't break many people's projects, which is -the last thing you want from your Logging library (again...). - -This does not mean Logrus is dead. Logrus will continue to be maintained for -security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). - -I believe Logrus' biggest contribution is to have played a part in today's -widespread use of structured logging in Golang. There doesn't seem to be a -reason to do a major, breaking iteration into Logrus V2, since the fantastic Go -community has built those independently. Many fantastic alternatives have sprung -up. Logrus would look like those, had it been re-designed with what we know -about structured logging in Go today. Check out, for example, -[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. - -[zerolog]: https://github.com/rs/zerolog -[zap]: https://github.com/uber-go/zap -[apex]: https://github.com/apex/log - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -``` -To ensure this behaviour even if a TTY is attached, set your formatter as follows: - -```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) -``` - -#### Logging Method Name - -If you wish to add the calling method as a field, instruct the logger via: -```go -log.SetReportCaller(true) -``` -This adds the caller as 'method' like so: - -```json -{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", -"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} -``` - -```text -time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin -``` -Note that this does add measurable overhead - the cost will depend on the version of Go, but is -between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: -``` -go test -bench=.*CallerTracing -``` - - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) - - -#### Level logging - -Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -func init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * When colors are enabled, levels are truncated to 4 characters by default. To disable - truncation set the `DisableLevelTruncation` field to `true`. - * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. -* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. -* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go deleted file mode 100644 index 8fd189e..0000000 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ /dev/null @@ -1,76 +0,0 @@ -package logrus - -// The following code was sourced and modified from the -// https://github.com/tebeka/atexit package governed by the following license: -// -// Copyright (c) 2012 Miki Tebeka . -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ( - "fmt" - "os" -) - -var handlers = []func(){} - -func runHandler(handler func()) { - defer func() { - if err := recover(); err != nil { - fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) - } - }() - - handler() -} - -func runHandlers() { - for _, handler := range handlers { - runHandler(handler) - } -} - -// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) -func Exit(code int) { - runHandlers() - os.Exit(code) -} - -// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func RegisterExitHandler(handler func()) { - handlers = append(handlers, handler) -} - -// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func DeferExitHandler(handler func()) { - handlers = append([]func(){handler}, handlers...) -} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml deleted file mode 100644 index df9d65c..0000000 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go deleted file mode 100644 index c7787f7..0000000 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ /dev/null @@ -1,43 +0,0 @@ -package logrus - -import ( - "bytes" - "sync" -) - -var ( - bufferPool BufferPool -) - -type BufferPool interface { - Put(*bytes.Buffer) - Get() *bytes.Buffer -} - -type defaultPool struct { - pool *sync.Pool -} - -func (p *defaultPool) Put(buf *bytes.Buffer) { - p.pool.Put(buf) -} - -func (p *defaultPool) Get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -// SetBufferPool allows to replace the default logrus buffer pool -// to better meets the specific needs of an application. -func SetBufferPool(bp BufferPool) { - bufferPool = bp -} - -func init() { - SetBufferPool(&defaultPool{ - pool: &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }, - }) -} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go deleted file mode 100644 index da67aba..0000000 --- a/vendor/github.com/sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go deleted file mode 100644 index 71cdbbc..0000000 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ /dev/null @@ -1,442 +0,0 @@ -package logrus - -import ( - "bytes" - "context" - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" -) - -var ( - - // qualified package name, cached at first use - logrusPackage string - - // Positions in the call stack when tracing to report the calling method - minimumCallerDepth int - - // Used for caller information initialisation - callerInitOnce sync.Once -) - -const ( - maximumCallerDepth int = 25 - knownLogrusFrames int = 4 -) - -func init() { - // start at the bottom of the stack before the package-name cache is primed - minimumCallerDepth = 1 -} - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, -// Info, Warn, Error, Fatal or Panic is called on it. These objects can be -// reused and passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic - // This field will be set on entry firing and the value will be equal to the one in Logger struct field. - Level Level - - // Calling method, with package name - Caller *runtime.Frame - - // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic - Message string - - // When formatter is called in entry.log(), a Buffer may be set to entry - Buffer *bytes.Buffer - - // Contains the context set by the user. Useful for hook processing etc. - Context context.Context - - // err may contain a field formatting error - err string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, plus one optional. Give a little extra room. - Data: make(Fields, 6), - } -} - -func (entry *Entry) Dup() *Entry { - data := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} -} - -// Returns the bytes representation of this entry from the formatter. -func (entry *Entry) Bytes() ([]byte, error) { - return entry.Logger.Formatter.Format(entry) -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - serialized, err := entry.Bytes() - if err != nil { - return "", err - } - str := string(serialized) - return str, nil -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a context to the Entry. -func (entry *Entry) WithContext(ctx context.Context) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - fieldErr := entry.err - for k, v := range fields { - isErrField := false - if t := reflect.TypeOf(v); t != nil { - switch { - case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: - isErrField = true - } - } - if isErrField { - tmp := fmt.Sprintf("can not add field %q", k) - if fieldErr != "" { - fieldErr = entry.err + ", " + tmp - } else { - fieldErr = tmp - } - } else { - data[k] = v - } - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} -} - -// Overrides the time of the Entry. -func (entry *Entry) WithTime(t time.Time) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} -} - -// getPackageName reduces a fully qualified function name to the package name -// There really ought to be to be a better way... -func getPackageName(f string) string { - for { - lastPeriod := strings.LastIndex(f, ".") - lastSlash := strings.LastIndex(f, "/") - if lastPeriod > lastSlash { - f = f[:lastPeriod] - } else { - break - } - } - - return f -} - -// getCaller retrieves the name of the first non-logrus calling function -func getCaller() *runtime.Frame { - // cache this package's fully-qualified name - callerInitOnce.Do(func() { - pcs := make([]uintptr, maximumCallerDepth) - _ = runtime.Callers(0, pcs) - - // dynamic get the package name and the minimum caller depth - for i := 0; i < maximumCallerDepth; i++ { - funcName := runtime.FuncForPC(pcs[i]).Name() - if strings.Contains(funcName, "getCaller") { - logrusPackage = getPackageName(funcName) - break - } - } - - minimumCallerDepth = knownLogrusFrames - }) - - // Restrict the lookback frames to avoid runaway lookups - pcs := make([]uintptr, maximumCallerDepth) - depth := runtime.Callers(minimumCallerDepth, pcs) - frames := runtime.CallersFrames(pcs[:depth]) - - for f, again := frames.Next(); again; f, again = frames.Next() { - pkg := getPackageName(f.Function) - - // If the caller isn't part of this package, we're done - if pkg != logrusPackage { - return &f //nolint:scopelint - } - } - - // if we got here, we failed to find the caller's context - return nil -} - -func (entry Entry) HasCaller() (has bool) { - return entry.Logger != nil && - entry.Logger.ReportCaller && - entry.Caller != nil -} - -func (entry *Entry) log(level Level, msg string) { - var buffer *bytes.Buffer - - newEntry := entry.Dup() - - if newEntry.Time.IsZero() { - newEntry.Time = time.Now() - } - - newEntry.Level = level - newEntry.Message = msg - - newEntry.Logger.mu.Lock() - reportCaller := newEntry.Logger.ReportCaller - bufPool := newEntry.getBufferPool() - newEntry.Logger.mu.Unlock() - - if reportCaller { - newEntry.Caller = getCaller() - } - - newEntry.fireHooks() - buffer = bufPool.Get() - defer func() { - newEntry.Buffer = nil - buffer.Reset() - bufPool.Put(buffer) - }() - buffer.Reset() - newEntry.Buffer = buffer - - newEntry.write() - - newEntry.Buffer = nil - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(newEntry) - } -} - -func (entry *Entry) getBufferPool() (pool BufferPool) { - if entry.Logger.BufferPool != nil { - return entry.Logger.BufferPool - } - return bufferPool -} - -func (entry *Entry) fireHooks() { - var tmpHooks LevelHooks - entry.Logger.mu.Lock() - tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) - for k, v := range entry.Logger.Hooks { - tmpHooks[k] = v - } - entry.Logger.mu.Unlock() - - err := tmpHooks.Fire(entry.Level, entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - } -} - -func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - return - } - if _, err := entry.Logger.Out.Write(serialized); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Entry.Panic or Entry.Fatal should be used instead. -func (entry *Entry) Log(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.log(level, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Trace(args ...interface{}) { - entry.Log(TraceLevel, args...) -} - -func (entry *Entry) Debug(args ...interface{}) { - entry.Log(DebugLevel, args...) -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - entry.Log(InfoLevel, args...) -} - -func (entry *Entry) Warn(args ...interface{}) { - entry.Log(WarnLevel, args...) -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - entry.Log(ErrorLevel, args...) -} - -func (entry *Entry) Fatal(args ...interface{}) { - entry.Log(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - entry.Log(PanicLevel, args...) -} - -// Entry Printf family functions - -func (entry *Entry) Logf(level Level, format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Tracef(format string, args ...interface{}) { - entry.Logf(TraceLevel, format, args...) -} - -func (entry *Entry) Debugf(format string, args ...interface{}) { - entry.Logf(DebugLevel, format, args...) -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - entry.Logf(InfoLevel, format, args...) -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - entry.Logf(WarnLevel, format, args...) -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - entry.Logf(ErrorLevel, format, args...) -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - entry.Logf(FatalLevel, format, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - entry.Logf(PanicLevel, format, args...) -} - -// Entry Println family functions - -func (entry *Entry) Logln(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Traceln(args ...interface{}) { - entry.Logln(TraceLevel, args...) -} - -func (entry *Entry) Debugln(args ...interface{}) { - entry.Logln(DebugLevel, args...) -} - -func (entry *Entry) Infoln(args ...interface{}) { - entry.Logln(InfoLevel, args...) -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - entry.Logln(WarnLevel, args...) -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - entry.Logln(ErrorLevel, args...) -} - -func (entry *Entry) Fatalln(args ...interface{}) { - entry.Logln(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - entry.Logln(PanicLevel, args...) -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go deleted file mode 100644 index 017c30c..0000000 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ /dev/null @@ -1,270 +0,0 @@ -package logrus - -import ( - "context" - "io" - "time" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.SetOutput(out) -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.SetFormatter(formatter) -} - -// SetReportCaller sets whether the standard logger will include the calling -// method as a field. -func SetReportCaller(include bool) { - std.SetReportCaller(include) -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.SetLevel(level) -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - return std.GetLevel() -} - -// IsLevelEnabled checks if the log level of the standard logger is greater than the level param -func IsLevelEnabled(level Level) bool { - return std.IsLevelEnabled(level) -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.AddHook(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithContext creates an entry from the standard logger and adds a context to it. -func WithContext(ctx context.Context) *Entry { - return std.WithContext(ctx) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// WithTime creates an entry from the standard logger and overrides the time of -// logs generated with it. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithTime(t time.Time) *Entry { - return std.WithTime(t) -} - -// Trace logs a message at level Trace on the standard logger. -func Trace(args ...interface{}) { - std.Trace(args...) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// TraceFn logs a message from a func at level Trace on the standard logger. -func TraceFn(fn LogFunction) { - std.TraceFn(fn) -} - -// DebugFn logs a message from a func at level Debug on the standard logger. -func DebugFn(fn LogFunction) { - std.DebugFn(fn) -} - -// PrintFn logs a message from a func at level Info on the standard logger. -func PrintFn(fn LogFunction) { - std.PrintFn(fn) -} - -// InfoFn logs a message from a func at level Info on the standard logger. -func InfoFn(fn LogFunction) { - std.InfoFn(fn) -} - -// WarnFn logs a message from a func at level Warn on the standard logger. -func WarnFn(fn LogFunction) { - std.WarnFn(fn) -} - -// WarningFn logs a message from a func at level Warn on the standard logger. -func WarningFn(fn LogFunction) { - std.WarningFn(fn) -} - -// ErrorFn logs a message from a func at level Error on the standard logger. -func ErrorFn(fn LogFunction) { - std.ErrorFn(fn) -} - -// PanicFn logs a message from a func at level Panic on the standard logger. -func PanicFn(fn LogFunction) { - std.PanicFn(fn) -} - -// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. -func FatalFn(fn LogFunction) { - std.FatalFn(fn) -} - -// Tracef logs a message at level Trace on the standard logger. -func Tracef(format string, args ...interface{}) { - std.Tracef(format, args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Traceln logs a message at level Trace on the standard logger. -func Traceln(args ...interface{}) { - std.Traceln(args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go deleted file mode 100644 index 4088837..0000000 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ /dev/null @@ -1,78 +0,0 @@ -package logrus - -import "time" - -// Default key names for the default fields -const ( - defaultTimestampFormat = time.RFC3339 - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" - FieldKeyLogrusError = "logrus_error" - FieldKeyFunc = "func" - FieldKeyFile = "file" -) - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { - timeKey := fieldMap.resolve(FieldKeyTime) - if t, ok := data[timeKey]; ok { - data["fields."+timeKey] = t - delete(data, timeKey) - } - - msgKey := fieldMap.resolve(FieldKeyMsg) - if m, ok := data[msgKey]; ok { - data["fields."+msgKey] = m - delete(data, msgKey) - } - - levelKey := fieldMap.resolve(FieldKeyLevel) - if l, ok := data[levelKey]; ok { - data["fields."+levelKey] = l - delete(data, levelKey) - } - - logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) - if l, ok := data[logrusErrKey]; ok { - data["fields."+logrusErrKey] = l - delete(data, logrusErrKey) - } - - // If reportCaller is not set, 'func' will not conflict. - if reportCaller { - funcKey := fieldMap.resolve(FieldKeyFunc) - if l, ok := data[funcKey]; ok { - data["fields."+funcKey] = l - } - fileKey := fieldMap.resolve(FieldKeyFile) - if l, ok := data[fileKey]; ok { - data["fields."+fileKey] = l - } - } -} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cd..0000000 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go deleted file mode 100644 index c96dc56..0000000 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,128 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "fmt" - "runtime" -) - -type fieldKey string - -// FieldMap allows customization of the key names for default fields. -type FieldMap map[fieldKey]string - -func (f FieldMap) resolve(key fieldKey) string { - if k, ok := f[key]; ok { - return k - } - - return string(key) -} - -// JSONFormatter formats logs into parsable json -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // DisableTimestamp allows disabling automatic timestamps in output - DisableTimestamp bool - - // DisableHTMLEscape allows disabling html escaping in output - DisableHTMLEscape bool - - // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. - DataKey string - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &JSONFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", - // FieldKeyFunc: "@caller", - // }, - // } - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the json data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from json fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - // PrettyPrint will indent all json logs - PrettyPrint bool -} - -// Format renders a single log entry -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+4) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - - if f.DataKey != "" { - newData := make(Fields, 4) - newData[f.DataKey] = data - data = newData - } - - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - - if entry.err != "" { - data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err - } - if !f.DisableTimestamp { - data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) - } - data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message - data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - if entry.HasCaller() { - funcVal := entry.Caller.Function - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - if funcVal != "" { - data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal - } - if fileVal != "" { - data[f.FieldMap.resolve(FieldKeyFile)] = fileVal - } - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - encoder := json.NewEncoder(b) - encoder.SetEscapeHTML(!f.DisableHTMLEscape) - if f.PrettyPrint { - encoder.SetIndent("", " ") - } - if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go deleted file mode 100644 index 5ff0aef..0000000 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ /dev/null @@ -1,417 +0,0 @@ -package logrus - -import ( - "context" - "io" - "os" - "sync" - "sync/atomic" - "time" -) - -// LogFunction For big messages, it can be more efficient to pass a function -// and only call it if the log level is actually enables rather than -// generating the log message and then checking if the level is enabled -type LogFunction func() []interface{} - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventurous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - - // Flag for whether to log caller info (off by default) - ReportCaller bool - - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. - Level Level - // Used to sync writing to the log. Locking is enabled by Default - mu MutexWrap - // Reusable empty entry - entryPool sync.Pool - // Function to exit the application, defaults to `os.Exit()` - ExitFunc exitFunc - // The buffer pool used to format the log. If it is nil, the default global - // buffer pool will be used. - BufferPool BufferPool -} - -type exitFunc func(int) - -type MutexWrap struct { - lock sync.Mutex - disabled bool -} - -func (mw *MutexWrap) Lock() { - if !mw.disabled { - mw.lock.Lock() - } -} - -func (mw *MutexWrap) Unlock() { - if !mw.disabled { - mw.lock.Unlock() - } -} - -func (mw *MutexWrap) Disable() { - mw.disabled = true -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - ExitFunc: os.Exit, - ReportCaller: false, - } -} - -func (logger *Logger) newEntry() *Entry { - entry, ok := logger.entryPool.Get().(*Entry) - if ok { - return entry - } - return NewEntry(logger) -} - -func (logger *Logger) releaseEntry(entry *Entry) { - entry.Data = map[string]interface{}{} - logger.entryPool.Put(entry) -} - -// WithField allocates a new entry and adds a field to it. -// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to -// this new returned entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithError(err) -} - -// Add a context to the log entry. -func (logger *Logger) WithContext(ctx context.Context) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithContext(ctx) -} - -// Overrides the time of the log entry. -func (logger *Logger) WithTime(t time.Time) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithTime(t) -} - -func (logger *Logger) Logf(level Level, format string, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logf(level, format, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Tracef(format string, args ...interface{}) { - logger.Logf(TraceLevel, format, args...) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - logger.Logf(DebugLevel, format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - logger.Logf(InfoLevel, format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - entry := logger.newEntry() - entry.Printf(format, args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - logger.Logf(WarnLevel, format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - logger.Logf(ErrorLevel, format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - logger.Logf(FatalLevel, format, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - logger.Logf(PanicLevel, format, args...) -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Logger.Panic or Logger.Fatal should be used instead. -func (logger *Logger) Log(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) LogFn(level Level, fn LogFunction) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, fn()...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Trace(args ...interface{}) { - logger.Log(TraceLevel, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - logger.Log(DebugLevel, args...) -} - -func (logger *Logger) Info(args ...interface{}) { - logger.Log(InfoLevel, args...) -} - -func (logger *Logger) Print(args ...interface{}) { - entry := logger.newEntry() - entry.Print(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warn(args ...interface{}) { - logger.Log(WarnLevel, args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - logger.Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - logger.Log(ErrorLevel, args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - logger.Log(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - logger.Log(PanicLevel, args...) -} - -func (logger *Logger) TraceFn(fn LogFunction) { - logger.LogFn(TraceLevel, fn) -} - -func (logger *Logger) DebugFn(fn LogFunction) { - logger.LogFn(DebugLevel, fn) -} - -func (logger *Logger) InfoFn(fn LogFunction) { - logger.LogFn(InfoLevel, fn) -} - -func (logger *Logger) PrintFn(fn LogFunction) { - entry := logger.newEntry() - entry.Print(fn()...) - logger.releaseEntry(entry) -} - -func (logger *Logger) WarnFn(fn LogFunction) { - logger.LogFn(WarnLevel, fn) -} - -func (logger *Logger) WarningFn(fn LogFunction) { - logger.WarnFn(fn) -} - -func (logger *Logger) ErrorFn(fn LogFunction) { - logger.LogFn(ErrorLevel, fn) -} - -func (logger *Logger) FatalFn(fn LogFunction) { - logger.LogFn(FatalLevel, fn) - logger.Exit(1) -} - -func (logger *Logger) PanicFn(fn LogFunction) { - logger.LogFn(PanicLevel, fn) -} - -func (logger *Logger) Logln(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logln(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Traceln(args ...interface{}) { - logger.Logln(TraceLevel, args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - logger.Logln(DebugLevel, args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - logger.Logln(InfoLevel, args...) -} - -func (logger *Logger) Println(args ...interface{}) { - entry := logger.newEntry() - entry.Println(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnln(args ...interface{}) { - logger.Logln(WarnLevel, args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - logger.Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - logger.Logln(ErrorLevel, args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - logger.Logln(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - logger.Logln(PanicLevel, args...) -} - -func (logger *Logger) Exit(code int) { - runHandlers() - if logger.ExitFunc == nil { - logger.ExitFunc = os.Exit - } - logger.ExitFunc(code) -} - -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. -func (logger *Logger) SetNoLock() { - logger.mu.Disable() -} - -func (logger *Logger) level() Level { - return Level(atomic.LoadUint32((*uint32)(&logger.Level))) -} - -// SetLevel sets the logger level. -func (logger *Logger) SetLevel(level Level) { - atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) -} - -// GetLevel returns the logger level. -func (logger *Logger) GetLevel() Level { - return logger.level() -} - -// AddHook adds a hook to the logger hooks. -func (logger *Logger) AddHook(hook Hook) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Hooks.Add(hook) -} - -// IsLevelEnabled checks if the log level of the logger is greater than the level param -func (logger *Logger) IsLevelEnabled(level Level) bool { - return logger.level() >= level -} - -// SetFormatter sets the logger formatter. -func (logger *Logger) SetFormatter(formatter Formatter) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Formatter = formatter -} - -// SetOutput sets the logger output. -func (logger *Logger) SetOutput(output io.Writer) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Out = output -} - -func (logger *Logger) SetReportCaller(reportCaller bool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.ReportCaller = reportCaller -} - -// ReplaceHooks replaces the logger hooks and returns the old ones -func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { - logger.mu.Lock() - oldHooks := logger.Hooks - logger.Hooks = hooks - logger.mu.Unlock() - return oldHooks -} - -// SetBufferPool sets the logger buffer pool. -func (logger *Logger) SetBufferPool(pool BufferPool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.BufferPool = pool -} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go deleted file mode 100644 index 2f16224..0000000 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ /dev/null @@ -1,186 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint32 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - if b, err := level.MarshalText(); err == nil { - return string(b) - } else { - return "unknown" - } -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - case "trace": - return TraceLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (level *Level) UnmarshalText(text []byte) error { - l, err := ParseLevel(string(text)) - if err != nil { - return err - } - - *level = l - - return nil -} - -func (level Level) MarshalText() ([]byte, error) { - switch level { - case TraceLevel: - return []byte("trace"), nil - case DebugLevel: - return []byte("debug"), nil - case InfoLevel: - return []byte("info"), nil - case WarnLevel: - return []byte("warning"), nil - case ErrorLevel: - return []byte("error"), nil - case FatalLevel: - return []byte("fatal"), nil - case PanicLevel: - return []byte("panic"), nil - } - - return nil, fmt.Errorf("not a valid logrus level %d", level) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, - TraceLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel - // TraceLevel level. Designates finer-grained informational events than the Debug. - TraceLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) - - // IsDebugEnabled() bool - // IsInfoEnabled() bool - // IsWarnEnabled() bool - // IsErrorEnabled() bool - // IsFatalEnabled() bool - // IsPanicEnabled() bool -} - -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. -type Ext1FieldLogger interface { - FieldLogger - Tracef(format string, args ...interface{}) - Trace(args ...interface{}) - Traceln(args ...interface{}) -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go deleted file mode 100644 index 2403de9..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return true -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go deleted file mode 100644 index 4997899..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go deleted file mode 100644 index ebdae3e..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build js - -package logrus - -func isTerminal(fd int) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go deleted file mode 100644 index 97af92c..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build js nacl plan9 - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go deleted file mode 100644 index 3293fb3..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !appengine,!js,!windows,!nacl,!plan9 - -package logrus - -import ( - "io" - "os" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - return isTerminal(int(v.Fd())) - default: - return false - } -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go deleted file mode 100644 index f6710b3..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package logrus - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go deleted file mode 100644 index 04748b8..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux aix zos -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go deleted file mode 100644 index 2879eb5..0000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !appengine,!js,windows - -package logrus - -import ( - "io" - "os" - - "golang.org/x/sys/windows" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - handle := windows.Handle(v.Fd()) - var mode uint32 - if err := windows.GetConsoleMode(handle, &mode); err != nil { - return false - } - mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - if err := windows.SetConsoleMode(handle, mode); err != nil { - return false - } - return true - } - return false -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go deleted file mode 100644 index be2c6ef..0000000 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,339 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "os" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - red = 31 - yellow = 33 - blue = 36 - gray = 37 -) - -var baseTimestamp time.Time - -func init() { - baseTimestamp = time.Now() -} - -// TextFormatter formats logs into text -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Force quoting of all values - ForceQuote bool - - // DisableQuote disables quoting for all values. - // DisableQuote will have a lower priority than ForceQuote. - // If both of them are set to true, quote will be forced on all values. - DisableQuote bool - - // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ - EnvironmentOverrideColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool - - // The keys sorting function, when uninitialized it uses sort.Strings. - SortingFunc func([]string) - - // Disables the truncation of the level text to 4 characters. - DisableLevelTruncation bool - - // PadLevelText Adds padding the level text so that all the levels output at the same length - // PadLevelText is a superset of the DisableLevelTruncation option - PadLevelText bool - - // QuoteEmptyFields will wrap empty fields in quotes if true - QuoteEmptyFields bool - - // Whether the logger's out is to a terminal - isTerminal bool - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &TextFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message"}} - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - terminalInitOnce sync.Once - - // The max length of the level text, generated dynamically on init - levelTextMaxLength int -} - -func (f *TextFormatter) init(entry *Entry) { - if entry.Logger != nil { - f.isTerminal = checkIfTerminal(entry.Logger.Out) - } - // Get the max length of the level text - for _, level := range AllLevels { - levelTextLength := utf8.RuneCount([]byte(level.String())) - if levelTextLength > f.levelTextMaxLength { - f.levelTextMaxLength = levelTextLength - } - } -} - -func (f *TextFormatter) isColored() bool { - isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) - - if f.EnvironmentOverrideColors { - switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { - case ok && force != "0": - isColored = true - case ok && force == "0", os.Getenv("CLICOLOR") == "0": - isColored = false - } - } - - return isColored && !f.DisableColors -} - -// Format renders a single log entry -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields) - for k, v := range entry.Data { - data[k] = v - } - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - var funcVal, fileVal string - - fixedKeys := make([]string, 0, 4+len(data)) - if !f.DisableTimestamp { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) - } - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) - if entry.Message != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) - } - if entry.err != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) - } - if entry.HasCaller() { - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } else { - funcVal = entry.Caller.Function - fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - } - - if funcVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) - } - if fileVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) - } - } - - if !f.DisableSorting { - if f.SortingFunc == nil { - sort.Strings(keys) - fixedKeys = append(fixedKeys, keys...) - } else { - if !f.isColored() { - fixedKeys = append(fixedKeys, keys...) - f.SortingFunc(fixedKeys) - } else { - f.SortingFunc(keys) - } - } - } else { - fixedKeys = append(fixedKeys, keys...) - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - f.terminalInitOnce.Do(func() { f.init(entry) }) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - if f.isColored() { - f.printColored(b, entry, keys, data, timestampFormat) - } else { - - for _, key := range fixedKeys { - var value interface{} - switch { - case key == f.FieldMap.resolve(FieldKeyTime): - value = entry.Time.Format(timestampFormat) - case key == f.FieldMap.resolve(FieldKeyLevel): - value = entry.Level.String() - case key == f.FieldMap.resolve(FieldKeyMsg): - value = entry.Message - case key == f.FieldMap.resolve(FieldKeyLogrusError): - value = entry.err - case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): - value = funcVal - case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): - value = fileVal - default: - value = data[key] - } - f.appendKeyValue(b, key, value) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel, TraceLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - case InfoLevel: - levelColor = blue - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation && !f.PadLevelText { - levelText = levelText[0:4] - } - if f.PadLevelText { - // Generates the format string used in the next line, for example "%-6s" or "%-7s". - // Based on the max level text length. - formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" - // Formats the level text by appending spaces up to the max length, for example: - // - "INFO " - // - "WARNING" - levelText = fmt.Sprintf(formatString, levelText) - } - - // Remove a single newline if it already exists in the message to keep - // the behavior of logrus text_formatter the same as the stdlib log package - entry.Message = strings.TrimSuffix(entry.Message, "\n") - - caller := "" - if entry.HasCaller() { - funcVal := fmt.Sprintf("%s()", entry.Caller.Function) - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - - if fileVal == "" { - caller = funcVal - } else if funcVal == "" { - caller = fileVal - } else { - caller = fileVal + " " + funcVal - } - } - - switch { - case f.DisableTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - case !f.FullTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - default: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) - } - for _, k := range keys { - v := data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) - f.appendValue(b, v) - } -} - -func (f *TextFormatter) needsQuoting(text string) bool { - if f.ForceQuote { - return true - } - if f.QuoteEmptyFields && len(text) == 0 { - return true - } - if f.DisableQuote { - return false - } - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - if b.Len() > 0 { - b.WriteByte(' ') - } - b.WriteString(key) - b.WriteByte('=') - f.appendValue(b, value) -} - -func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - stringVal, ok := value.(string) - if !ok { - stringVal = fmt.Sprint(value) - } - - if !f.needsQuoting(stringVal) { - b.WriteString(stringVal) - } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) - } -} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go deleted file mode 100644 index 72e8e3a..0000000 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ /dev/null @@ -1,70 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -// Writer at INFO level. See WriterLevel for details. -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -// WriterLevel returns an io.Writer that can be used to write arbitrary text to -// the logger at the given log level. Each line written to the writer will be -// printed in the usual way using formatters and hooks. The writer is part of an -// io.Pipe and it is the callers responsibility to close the writer when done. -// This can be used to override the standard library logger easily. -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - return NewEntry(logger).WriterLevel(level) -} - -func (entry *Entry) Writer() *io.PipeWriter { - return entry.WriterLevel(InfoLevel) -} - -func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - - switch level { - case TraceLevel: - printFunc = entry.Trace - case DebugLevel: - printFunc = entry.Debug - case InfoLevel: - printFunc = entry.Info - case WarnLevel: - printFunc = entry.Warn - case ErrorLevel: - printFunc = entry.Error - case FatalLevel: - printFunc = entry.Fatal - case PanicLevel: - printFunc = entry.Panic - default: - printFunc = entry.Print - } - - go entry.writerScanner(reader, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - entry.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/xdg-go/pbkdf2/.gitignore b/vendor/github.com/xdg-go/pbkdf2/.gitignore deleted file mode 100644 index f1c181e..0000000 --- a/vendor/github.com/xdg-go/pbkdf2/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out diff --git a/vendor/github.com/xdg-go/pbkdf2/LICENSE b/vendor/github.com/xdg-go/pbkdf2/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/vendor/github.com/xdg-go/pbkdf2/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg-go/pbkdf2/README.md b/vendor/github.com/xdg-go/pbkdf2/README.md deleted file mode 100644 index d2824e4..0000000 --- a/vendor/github.com/xdg-go/pbkdf2/README.md +++ /dev/null @@ -1,17 +0,0 @@ -[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/pbkdf2.svg)](https://pkg.go.dev/github.com/xdg-go/pbkdf2) -[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/pbkdf2)](https://goreportcard.com/report/github.com/xdg-go/pbkdf2) -[![Github Actions](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml) - -# pbkdf2 – Go implementation of PBKDF2 - -## Description - -Package pbkdf2 provides password-based key derivation based on -[RFC 8018](https://tools.ietf.org/html/rfc8018). - -## Copyright and License - -Copyright 2021 by David A. Golden. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"). You may -obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go b/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go deleted file mode 100644 index 029945c..0000000 --- a/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2021 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package pbkdf2 implements password-based key derivation using the PBKDF2 -// algorithm described in RFC 2898 and RFC 8018. -// -// It provides a drop-in replacement for `golang.org/x/crypto/pbkdf2`, with -// the following benefits: -// -// - Released as a module with semantic versioning -// -// - Does not pull in dependencies for unrelated `x/crypto/*` packages -// -// - Supports Go 1.9+ -// -// See https://tools.ietf.org/html/rfc8018#section-4 for security considerations -// in the selection of a salt and iteration count. -package pbkdf2 - -import ( - "crypto/hmac" - "encoding/binary" - "hash" -) - -// Key generates a derived key from a password using the PBKDF2 algorithm. The -// inputs include salt bytes, the iteration count, desired key length, and a -// constructor for a hashing function. For example, for a 32-byte key using -// SHA-256: -// -// key := Key([]byte("trustNo1"), salt, 10000, 32, sha256.New) -func Key(password, salt []byte, iterCount, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hLen := prf.Size() - numBlocks := keyLen / hLen - // Get an extra block if keyLen is not an even number of hLen blocks. - if keyLen%hLen > 0 { - numBlocks++ - } - - Ti := make([]byte, hLen) - Uj := make([]byte, hLen) - dk := make([]byte, 0, hLen*numBlocks) - buf := make([]byte, 4) - - for i := uint32(1); i <= uint32(numBlocks); i++ { - // Initialize Uj for j == 1 from salt and block index. - // Initialize Ti = U1. - binary.BigEndian.PutUint32(buf, i) - prf.Reset() - prf.Write(salt) - prf.Write(buf) - Uj = Uj[:0] - Uj = prf.Sum(Uj) - - // Ti = U1 ^ U2 ^ ... ^ Ux - copy(Ti, Uj) - for j := 2; j <= iterCount; j++ { - prf.Reset() - prf.Write(Uj) - Uj = Uj[:0] - Uj = prf.Sum(Uj) - for k := range Uj { - Ti[k] ^= Uj[k] - } - } - - // DK = concat(T1, T2, ... Tn) - dk = append(dk, Ti...) - } - - return dk[0:keyLen] -} diff --git a/vendor/github.com/xdg-go/scram/.gitignore b/vendor/github.com/xdg-go/scram/.gitignore deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/xdg-go/scram/CHANGELOG.md b/vendor/github.com/xdg-go/scram/CHANGELOG.md deleted file mode 100644 index 21828db..0000000 --- a/vendor/github.com/xdg-go/scram/CHANGELOG.md +++ /dev/null @@ -1,22 +0,0 @@ -# CHANGELOG - -## v1.1.1 - 2022-03-03 - -- Bump stringprep dependency to v1.0.3 for upstream CVE fix. - -## v1.1.0 - 2022-01-16 - -- Add SHA-512 hash generator function for convenience. - -## v1.0.2 - 2021-03-28 - -- Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to - minimize transitive dependencies and support Go 1.9+. - -## v1.0.1 - 2021-03-27 - -- Bump stringprep dependency to v1.0.2 for Go 1.11 support. - -## v1.0.0 - 2021-03-27 - -- First release as a Go module diff --git a/vendor/github.com/xdg-go/scram/LICENSE b/vendor/github.com/xdg-go/scram/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/vendor/github.com/xdg-go/scram/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg-go/scram/README.md b/vendor/github.com/xdg-go/scram/README.md deleted file mode 100644 index 3a46f5c..0000000 --- a/vendor/github.com/xdg-go/scram/README.md +++ /dev/null @@ -1,72 +0,0 @@ -[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/scram.svg)](https://pkg.go.dev/github.com/xdg-go/scram) -[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/scram)](https://goreportcard.com/report/github.com/xdg-go/scram) -[![Github Actions](https://github.com/xdg-go/scram/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/scram/actions/workflows/test.yml) - -# scram – Go implementation of RFC-5802 - -## Description - -Package scram provides client and server implementations of the Salted -Challenge Response Authentication Mechanism (SCRAM) described in -[RFC-5802](https://tools.ietf.org/html/rfc5802) and -[RFC-7677](https://tools.ietf.org/html/rfc7677). - -It includes both client and server side support. - -Channel binding and extensions are not (yet) supported. - -## Examples - -### Client side - - package main - - import "github.com/xdg-go/scram" - - func main() { - // Get Client with username, password and (optional) authorization ID. - clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "") - if err != nil { - panic(err) - } - - // Prepare the authentication conversation. Use the empty string as the - // initial server message argument to start the conversation. - conv := clientSHA1.NewConversation() - var serverMsg string - - // Get the first message, send it and read the response. - firstMsg, err := conv.Step(serverMsg) - if err != nil { - panic(err) - } - serverMsg = sendClientMsg(firstMsg) - - // Get the second message, send it, and read the response. - secondMsg, err := conv.Step(serverMsg) - if err != nil { - panic(err) - } - serverMsg = sendClientMsg(secondMsg) - - // Validate the server's final message. We have no further message to - // send so ignore that return value. - _, err = conv.Step(serverMsg) - if err != nil { - panic(err) - } - - return - } - - func sendClientMsg(s string) string { - // A real implementation would send this to a server and read a reply. - return "" - } - -## Copyright and License - -Copyright 2018 by David A. Golden. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"). You may -obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg-go/scram/client.go b/vendor/github.com/xdg-go/scram/client.go deleted file mode 100644 index 5b53021..0000000 --- a/vendor/github.com/xdg-go/scram/client.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "sync" - - "github.com/xdg-go/pbkdf2" -) - -// Client implements the client side of SCRAM authentication. It holds -// configuration values needed to initialize new client-side conversations for -// a specific username, password and authorization ID tuple. Client caches -// the computationally-expensive parts of a SCRAM conversation as described in -// RFC-5802. If repeated authentication conversations may be required for a -// user (e.g. disconnect/reconnect), the user's Client should be preserved. -// -// For security reasons, Clients have a default minimum PBKDF2 iteration count -// of 4096. If a server requests a smaller iteration count, an authentication -// conversation will error. -// -// A Client can also be used by a server application to construct the hashed -// authentication values to be stored for a new user. See StoredCredentials() -// for more. -type Client struct { - sync.RWMutex - username string - password string - authzID string - minIters int - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn - cache map[KeyFactors]derivedKeys -} - -func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client { - return &Client{ - username: username, - password: password, - authzID: authzID, - minIters: 4096, - nonceGen: defaultNonceGenerator, - hashGen: fcn, - cache: make(map[KeyFactors]derivedKeys), - } -} - -// WithMinIterations changes minimum required PBKDF2 iteration count. -func (c *Client) WithMinIterations(n int) *Client { - c.Lock() - defer c.Unlock() - c.minIters = n - return c -} - -// WithNonceGenerator replaces the default nonce generator (base64 encoding of -// 24 bytes from crypto/rand) with a custom generator. This is provided for -// testing or for users with custom nonce requirements. -func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client { - c.Lock() - defer c.Unlock() - c.nonceGen = ng - return c -} - -// NewConversation constructs a client-side authentication conversation. -// Conversations cannot be reused, so this must be called for each new -// authentication attempt. -func (c *Client) NewConversation() *ClientConversation { - c.RLock() - defer c.RUnlock() - return &ClientConversation{ - client: c, - nonceGen: c.nonceGen, - hashGen: c.hashGen, - minIters: c.minIters, - } -} - -func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys { - dk, ok := c.getCache(kf) - if !ok { - dk = c.computeKeys(kf) - c.setCache(kf, dk) - } - return dk -} - -// GetStoredCredentials takes a salt and iteration count structure and -// provides the values that must be stored by a server to authentication a -// user. These values are what the Server credential lookup function must -// return for a given username. -func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials { - dk := c.getDerivedKeys(kf) - return StoredCredentials{ - KeyFactors: kf, - StoredKey: dk.StoredKey, - ServerKey: dk.ServerKey, - } -} - -func (c *Client) computeKeys(kf KeyFactors) derivedKeys { - h := c.hashGen() - saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen) - clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key")) - - return derivedKeys{ - ClientKey: clientKey, - StoredKey: computeHash(c.hashGen, clientKey), - ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")), - } -} - -func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) { - c.RLock() - defer c.RUnlock() - dk, ok := c.cache[kf] - return dk, ok -} - -func (c *Client) setCache(kf KeyFactors, dk derivedKeys) { - c.Lock() - defer c.Unlock() - c.cache[kf] = dk - return -} diff --git a/vendor/github.com/xdg-go/scram/client_conv.go b/vendor/github.com/xdg-go/scram/client_conv.go deleted file mode 100644 index 8340568..0000000 --- a/vendor/github.com/xdg-go/scram/client_conv.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/hmac" - "encoding/base64" - "errors" - "fmt" - "strings" -) - -type clientState int - -const ( - clientStarting clientState = iota - clientFirst - clientFinal - clientDone -) - -// ClientConversation implements the client-side of an authentication -// conversation with a server. A new conversation must be created for -// each authentication attempt. -type ClientConversation struct { - client *Client - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn - minIters int - state clientState - valid bool - gs2 string - nonce string - c1b string - serveSig []byte -} - -// Step takes a string provided from a server (or just an empty string for the -// very first conversation step) and attempts to move the authentication -// conversation forward. It returns a string to be sent to the server or an -// error if the server message is invalid. Calling Step after a conversation -// completes is also an error. -func (cc *ClientConversation) Step(challenge string) (response string, err error) { - switch cc.state { - case clientStarting: - cc.state = clientFirst - response, err = cc.firstMsg() - case clientFirst: - cc.state = clientFinal - response, err = cc.finalMsg(challenge) - case clientFinal: - cc.state = clientDone - response, err = cc.validateServer(challenge) - default: - response, err = "", errors.New("Conversation already completed") - } - return -} - -// Done returns true if the conversation is completed or has errored. -func (cc *ClientConversation) Done() bool { - return cc.state == clientDone -} - -// Valid returns true if the conversation successfully authenticated with the -// server, including counter-validation that the server actually has the -// user's stored credentials. -func (cc *ClientConversation) Valid() bool { - return cc.valid -} - -func (cc *ClientConversation) firstMsg() (string, error) { - // Values are cached for use in final message parameters - cc.gs2 = cc.gs2Header() - cc.nonce = cc.client.nonceGen() - cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce) - - return cc.gs2 + cc.c1b, nil -} - -func (cc *ClientConversation) finalMsg(s1 string) (string, error) { - msg, err := parseServerFirst(s1) - if err != nil { - return "", err - } - - // Check nonce prefix and update - if !strings.HasPrefix(msg.nonce, cc.nonce) { - return "", errors.New("server nonce did not extend client nonce") - } - cc.nonce = msg.nonce - - // Check iteration count vs minimum - if msg.iters < cc.minIters { - return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters) - } - - // Create client-final-message-without-proof - c2wop := fmt.Sprintf( - "c=%s,r=%s", - base64.StdEncoding.EncodeToString([]byte(cc.gs2)), - cc.nonce, - ) - - // Create auth message - authMsg := cc.c1b + "," + s1 + "," + c2wop - - // Get derived keys from client cache - dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters}) - - // Create proof as clientkey XOR clientsignature - clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg)) - clientProof := xorBytes(dk.ClientKey, clientSignature) - proof := base64.StdEncoding.EncodeToString(clientProof) - - // Cache ServerSignature for later validation - cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg)) - - return fmt.Sprintf("%s,p=%s", c2wop, proof), nil -} - -func (cc *ClientConversation) validateServer(s2 string) (string, error) { - msg, err := parseServerFinal(s2) - if err != nil { - return "", err - } - - if len(msg.err) > 0 { - return "", fmt.Errorf("server error: %s", msg.err) - } - - if !hmac.Equal(msg.verifier, cc.serveSig) { - return "", errors.New("server validation failed") - } - - cc.valid = true - return "", nil -} - -func (cc *ClientConversation) gs2Header() string { - if cc.client.authzID == "" { - return "n,," - } - return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID)) -} diff --git a/vendor/github.com/xdg-go/scram/common.go b/vendor/github.com/xdg-go/scram/common.go deleted file mode 100644 index cb705cb..0000000 --- a/vendor/github.com/xdg-go/scram/common.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/hmac" - "crypto/rand" - "encoding/base64" - "strings" -) - -// NonceGeneratorFcn defines a function that returns a string of high-quality -// random printable ASCII characters EXCLUDING the comma (',') character. The -// default nonce generator provides Base64 encoding of 24 bytes from -// crypto/rand. -type NonceGeneratorFcn func() string - -// derivedKeys collects the three cryptographically derived values -// into one struct for caching. -type derivedKeys struct { - ClientKey []byte - StoredKey []byte - ServerKey []byte -} - -// KeyFactors represent the two server-provided factors needed to compute -// client credentials for authentication. Salt is decoded bytes (i.e. not -// base64), but in string form so that KeyFactors can be used as a map key for -// cached credentials. -type KeyFactors struct { - Salt string - Iters int -} - -// StoredCredentials are the values that a server must store for a given -// username to allow authentication. They include the salt and iteration -// count, plus the derived values to authenticate a client and for the server -// to authenticate itself back to the client. -// -// NOTE: these are specific to a given hash function. To allow a user to -// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of -// StoredCredentials must be created and stored, one for each hash function. -type StoredCredentials struct { - KeyFactors - StoredKey []byte - ServerKey []byte -} - -// CredentialLookup is a callback to provide StoredCredentials for a given -// username. This is used to configure Server objects. -// -// NOTE: these are specific to a given hash function. The callback provided -// to a Server with a given hash function must provide the corresponding -// StoredCredentials. -type CredentialLookup func(string) (StoredCredentials, error) - -func defaultNonceGenerator() string { - raw := make([]byte, 24) - nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) - rand.Read(raw) - base64.StdEncoding.Encode(nonce, raw) - return string(nonce) -} - -func encodeName(s string) string { - return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1) -} - -func decodeName(s string) (string, error) { - // TODO Check for = not followed by 2C or 3D - return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil -} - -func computeHash(hg HashGeneratorFcn, b []byte) []byte { - h := hg() - h.Write(b) - return h.Sum(nil) -} - -func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte { - mac := hmac.New(hg, key) - mac.Write(data) - return mac.Sum(nil) -} - -func xorBytes(a, b []byte) []byte { - // TODO check a & b are same length, or just xor to smallest - xor := make([]byte, len(a)) - for i := range a { - xor[i] = a[i] ^ b[i] - } - return xor -} diff --git a/vendor/github.com/xdg-go/scram/doc.go b/vendor/github.com/xdg-go/scram/doc.go deleted file mode 100644 index 82e8aee..0000000 --- a/vendor/github.com/xdg-go/scram/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package scram provides client and server implementations of the Salted -// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802 -// and RFC-7677. -// -// Usage -// -// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that -// are used to construct Client or Server objects. -// -// clientSHA1, err := scram.SHA1.NewClient(username, password, authID) -// clientSHA256, err := scram.SHA256.NewClient(username, password, authID) -// clientSHA512, err := scram.SHA512.NewClient(username, password, authID) -// -// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) -// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) -// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn) -// -// These objects are used to construct ClientConversation or -// ServerConversation objects that are used to carry out authentication. -package scram diff --git a/vendor/github.com/xdg-go/scram/parse.go b/vendor/github.com/xdg-go/scram/parse.go deleted file mode 100644 index 722f604..0000000 --- a/vendor/github.com/xdg-go/scram/parse.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "encoding/base64" - "errors" - "fmt" - "strconv" - "strings" -) - -type c1Msg struct { - gs2Header string - authzID string - username string - nonce string - c1b string -} - -type c2Msg struct { - cbind []byte - nonce string - proof []byte - c2wop string -} - -type s1Msg struct { - nonce string - salt []byte - iters int -} - -type s2Msg struct { - verifier []byte - err string -} - -func parseField(s, k string) (string, error) { - t := strings.TrimPrefix(s, k+"=") - if t == s { - return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k) - } - return t, nil -} - -func parseGS2Flag(s string) (string, error) { - if s[0] == 'p' { - return "", fmt.Errorf("channel binding requested but not supported") - } - - if s == "n" || s == "y" { - return s, nil - } - - return "", fmt.Errorf("error parsing '%s' for gs2 flag", s) -} - -func parseFieldBase64(s, k string) ([]byte, error) { - raw, err := parseField(s, k) - if err != nil { - return nil, err - } - - dec, err := base64.StdEncoding.DecodeString(raw) - if err != nil { - return nil, err - } - - return dec, nil -} - -func parseFieldInt(s, k string) (int, error) { - raw, err := parseField(s, k) - if err != nil { - return 0, err - } - - num, err := strconv.Atoi(raw) - if err != nil { - return 0, fmt.Errorf("error parsing field '%s': %v", k, err) - } - - return num, nil -} - -func parseClientFirst(c1 string) (msg c1Msg, err error) { - - fields := strings.Split(c1, ",") - if len(fields) < 4 { - err = errors.New("not enough fields in first server message") - return - } - - gs2flag, err := parseGS2Flag(fields[0]) - if err != nil { - return - } - - // 'a' field is optional - if len(fields[1]) > 0 { - msg.authzID, err = parseField(fields[1], "a") - if err != nil { - return - } - } - - // Recombine and save the gs2 header - msg.gs2Header = gs2flag + "," + msg.authzID + "," - - // Check for unsupported extensions field "m". - if strings.HasPrefix(fields[2], "m=") { - err = errors.New("SCRAM message extensions are not supported") - return - } - - msg.username, err = parseField(fields[2], "n") - if err != nil { - return - } - - msg.nonce, err = parseField(fields[3], "r") - if err != nil { - return - } - - msg.c1b = strings.Join(fields[2:], ",") - - return -} - -func parseClientFinal(c2 string) (msg c2Msg, err error) { - fields := strings.Split(c2, ",") - if len(fields) < 3 { - err = errors.New("not enough fields in first server message") - return - } - - msg.cbind, err = parseFieldBase64(fields[0], "c") - if err != nil { - return - } - - msg.nonce, err = parseField(fields[1], "r") - if err != nil { - return - } - - // Extension fields may come between nonce and proof, so we - // grab the *last* fields as proof. - msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p") - if err != nil { - return - } - - msg.c2wop = c2[:strings.LastIndex(c2, ",")] - - return -} - -func parseServerFirst(s1 string) (msg s1Msg, err error) { - - // Check for unsupported extensions field "m". - if strings.HasPrefix(s1, "m=") { - err = errors.New("SCRAM message extensions are not supported") - return - } - - fields := strings.Split(s1, ",") - if len(fields) < 3 { - err = errors.New("not enough fields in first server message") - return - } - - msg.nonce, err = parseField(fields[0], "r") - if err != nil { - return - } - - msg.salt, err = parseFieldBase64(fields[1], "s") - if err != nil { - return - } - - msg.iters, err = parseFieldInt(fields[2], "i") - - return -} - -func parseServerFinal(s2 string) (msg s2Msg, err error) { - fields := strings.Split(s2, ",") - - msg.verifier, err = parseFieldBase64(fields[0], "v") - if err == nil { - return - } - - msg.err, err = parseField(fields[0], "e") - - return -} diff --git a/vendor/github.com/xdg-go/scram/scram.go b/vendor/github.com/xdg-go/scram/scram.go deleted file mode 100644 index a7b3660..0000000 --- a/vendor/github.com/xdg-go/scram/scram.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "fmt" - "hash" - - "github.com/xdg-go/stringprep" -) - -// HashGeneratorFcn abstracts a factory function that returns a hash.Hash -// value to be used for SCRAM operations. Generally, one would use the -// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most -// common forms of SCRAM. -type HashGeneratorFcn func() hash.Hash - -// SHA1 is a function that returns a crypto/sha1 hasher and should be used to -// create Client objects configured for SHA-1 hashing. -var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } - -// SHA256 is a function that returns a crypto/sha256 hasher and should be used -// to create Client objects configured for SHA-256 hashing. -var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } - -// SHA512 is a function that returns a crypto/sha512 hasher and should be used -// to create Client objects configured for SHA-512 hashing. -var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() } - -// NewClient constructs a SCRAM client component based on a given hash.Hash -// factory receiver. This constructor will normalize the username, password -// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If -// SASLprep fails, the method returns an error. -func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) { - var userprep, passprep, authprep string - var err error - - if userprep, err = stringprep.SASLprep.Prepare(username); err != nil { - return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err) - } - if passprep, err = stringprep.SASLprep.Prepare(password); err != nil { - return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err) - } - if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil { - return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err) - } - - return newClient(userprep, passprep, authprep, f), nil -} - -// NewClientUnprepped acts like NewClient, except none of the arguments will -// be normalized via SASLprep. This is not generally recommended, but is -// provided for users that may have custom normalization needs. -func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) { - return newClient(username, password, authzID, f), nil -} - -// NewServer constructs a SCRAM server component based on a given hash.Hash -// factory receiver. To be maximally generic, it uses dependency injection to -// handle credential lookup, which is the process of turning a username string -// into a struct with stored credentials for authentication. -func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) { - return newServer(cl, f) -} diff --git a/vendor/github.com/xdg-go/scram/server.go b/vendor/github.com/xdg-go/scram/server.go deleted file mode 100644 index b119b36..0000000 --- a/vendor/github.com/xdg-go/scram/server.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import "sync" - -// Server implements the server side of SCRAM authentication. It holds -// configuration values needed to initialize new server-side conversations. -// Generally, this can be persistent within an application. -type Server struct { - sync.RWMutex - credentialCB CredentialLookup - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn -} - -func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) { - return &Server{ - credentialCB: cl, - nonceGen: defaultNonceGenerator, - hashGen: fcn, - }, nil -} - -// WithNonceGenerator replaces the default nonce generator (base64 encoding of -// 24 bytes from crypto/rand) with a custom generator. This is provided for -// testing or for users with custom nonce requirements. -func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server { - s.Lock() - defer s.Unlock() - s.nonceGen = ng - return s -} - -// NewConversation constructs a server-side authentication conversation. -// Conversations cannot be reused, so this must be called for each new -// authentication attempt. -func (s *Server) NewConversation() *ServerConversation { - s.RLock() - defer s.RUnlock() - return &ServerConversation{ - nonceGen: s.nonceGen, - hashGen: s.hashGen, - credentialCB: s.credentialCB, - } -} diff --git a/vendor/github.com/xdg-go/scram/server_conv.go b/vendor/github.com/xdg-go/scram/server_conv.go deleted file mode 100644 index 9c8838c..0000000 --- a/vendor/github.com/xdg-go/scram/server_conv.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package scram - -import ( - "crypto/hmac" - "encoding/base64" - "errors" - "fmt" -) - -type serverState int - -const ( - serverFirst serverState = iota - serverFinal - serverDone -) - -// ServerConversation implements the server-side of an authentication -// conversation with a client. A new conversation must be created for -// each authentication attempt. -type ServerConversation struct { - nonceGen NonceGeneratorFcn - hashGen HashGeneratorFcn - credentialCB CredentialLookup - state serverState - credential StoredCredentials - valid bool - gs2Header string - username string - authzID string - nonce string - c1b string - s1 string -} - -// Step takes a string provided from a client and attempts to move the -// authentication conversation forward. It returns a string to be sent to the -// client or an error if the client message is invalid. Calling Step after a -// conversation completes is also an error. -func (sc *ServerConversation) Step(challenge string) (response string, err error) { - switch sc.state { - case serverFirst: - sc.state = serverFinal - response, err = sc.firstMsg(challenge) - case serverFinal: - sc.state = serverDone - response, err = sc.finalMsg(challenge) - default: - response, err = "", errors.New("Conversation already completed") - } - return -} - -// Done returns true if the conversation is completed or has errored. -func (sc *ServerConversation) Done() bool { - return sc.state == serverDone -} - -// Valid returns true if the conversation successfully authenticated the -// client. -func (sc *ServerConversation) Valid() bool { - return sc.valid -} - -// Username returns the client-provided username. This is valid to call -// if the first conversation Step() is successful. -func (sc *ServerConversation) Username() string { - return sc.username -} - -// AuthzID returns the (optional) client-provided authorization identity, if -// any. If one was not provided, it returns the empty string. This is valid -// to call if the first conversation Step() is successful. -func (sc *ServerConversation) AuthzID() string { - return sc.authzID -} - -func (sc *ServerConversation) firstMsg(c1 string) (string, error) { - msg, err := parseClientFirst(c1) - if err != nil { - sc.state = serverDone - return "", err - } - - sc.gs2Header = msg.gs2Header - sc.username = msg.username - sc.authzID = msg.authzID - - sc.credential, err = sc.credentialCB(msg.username) - if err != nil { - sc.state = serverDone - return "e=unknown-user", err - } - - sc.nonce = msg.nonce + sc.nonceGen() - sc.c1b = msg.c1b - sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d", - sc.nonce, - base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)), - sc.credential.Iters, - ) - - return sc.s1, nil -} - -// For errors, returns server error message as well as non-nil error. Callers -// can choose whether to send server error or not. -func (sc *ServerConversation) finalMsg(c2 string) (string, error) { - msg, err := parseClientFinal(c2) - if err != nil { - return "", err - } - - // Check channel binding matches what we expect; in this case, we expect - // just the gs2 header we received as we don't support channel binding - // with a data payload. If we add binding, we need to independently - // compute the header to match here. - if string(msg.cbind) != sc.gs2Header { - return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header) - } - - // Check nonce received matches what we sent - if msg.nonce != sc.nonce { - return "e=other-error", errors.New("nonce received did not match nonce sent") - } - - // Create auth message - authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop - - // Retrieve ClientKey from proof and verify it - clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg)) - clientKey := xorBytes([]byte(msg.proof), clientSignature) - storedKey := computeHash(sc.hashGen, clientKey) - - // Compare with constant-time function - if !hmac.Equal(storedKey, sc.credential.StoredKey) { - return "e=invalid-proof", errors.New("challenge proof invalid") - } - - sc.valid = true - - // Compute and return server verifier - serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg)) - return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil -} diff --git a/vendor/github.com/xdg-go/stringprep/.gitignore b/vendor/github.com/xdg-go/stringprep/.gitignore deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md deleted file mode 100644 index e06787f..0000000 --- a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md +++ /dev/null @@ -1,29 +0,0 @@ -# CHANGELOG - - -## [v1.0.3] - 2022-03-01 - -### Maintenance - -- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 - - -## [v1.0.2] - 2021-03-27 - -### Maintenance - -- Change minimum Go version to 1.11 - - -## [v1.0.1] - 2021-03-24 - -### Bug Fixes - -- Add go.mod file - - -## [v1.0.0] - 2018-02-21 - -[v1.0.2]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.2 -[v1.0.1]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.1 -[v1.0.0]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.0 diff --git a/vendor/github.com/xdg-go/stringprep/LICENSE b/vendor/github.com/xdg-go/stringprep/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/vendor/github.com/xdg-go/stringprep/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg-go/stringprep/README.md b/vendor/github.com/xdg-go/stringprep/README.md deleted file mode 100644 index 83ea534..0000000 --- a/vendor/github.com/xdg-go/stringprep/README.md +++ /dev/null @@ -1,28 +0,0 @@ -[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/stringprep.svg)](https://pkg.go.dev/github.com/xdg-go/stringprep) -[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/stringprep)](https://goreportcard.com/report/github.com/xdg-go/stringprep) -[![Github Actions](https://github.com/xdg-go/stringprep/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/stringprep/actions/workflows/test.yml) - -# stringprep – Go implementation of RFC-3454 stringprep and RFC-4013 SASLprep - -## Synopsis - -``` - import "github.com/xdg-go/stringprep" - - prepped := stringprep.SASLprep.Prepare("TrustNô1") - -``` - -## Description - -This library provides an implementation of the stringprep algorithm -(RFC-3454) in Go, including all data tables. - -A pre-built SASLprep (RFC-4013) profile is provided as well. - -## Copyright and License - -Copyright 2018 by David A. Golden. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"). You may -obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg-go/stringprep/bidi.go b/vendor/github.com/xdg-go/stringprep/bidi.go deleted file mode 100644 index 6f6d321..0000000 --- a/vendor/github.com/xdg-go/stringprep/bidi.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -var errHasLCat = "BiDi string can't have runes from category L" -var errFirstRune = "BiDi string first rune must have category R or AL" -var errLastRune = "BiDi string last rune must have category R or AL" - -// Check for prohibited characters from table C.8 -func checkBiDiProhibitedRune(s string) error { - for _, r := range s { - if TableC8.Contains(r) { - return Error{Msg: errProhibited, Rune: r} - } - } - return nil -} - -// Check for LCat characters from table D.2 -func checkBiDiLCat(s string) error { - for _, r := range s { - if TableD2.Contains(r) { - return Error{Msg: errHasLCat, Rune: r} - } - } - return nil -} - -// Check first and last characters are in table D.1; requires non-empty string -func checkBadFirstAndLastRandALCat(s string) error { - rs := []rune(s) - if !TableD1.Contains(rs[0]) { - return Error{Msg: errFirstRune, Rune: rs[0]} - } - n := len(rs) - 1 - if !TableD1.Contains(rs[n]) { - return Error{Msg: errLastRune, Rune: rs[n]} - } - return nil -} - -// Look for RandALCat characters from table D.1 -func hasBiDiRandALCat(s string) bool { - for _, r := range s { - if TableD1.Contains(r) { - return true - } - } - return false -} - -// Check that BiDi rules are satisfied ; let empty string pass this rule -func passesBiDiRules(s string) error { - if len(s) == 0 { - return nil - } - if err := checkBiDiProhibitedRune(s); err != nil { - return err - } - if hasBiDiRandALCat(s) { - if err := checkBiDiLCat(s); err != nil { - return err - } - if err := checkBadFirstAndLastRandALCat(s); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/xdg-go/stringprep/doc.go b/vendor/github.com/xdg-go/stringprep/doc.go deleted file mode 100644 index b319e08..0000000 --- a/vendor/github.com/xdg-go/stringprep/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package stringprep provides data tables and algorithms for RFC-3454, -// including errata (as of 2018-02). It also provides a profile for -// SASLprep as defined in RFC-4013. -package stringprep diff --git a/vendor/github.com/xdg-go/stringprep/error.go b/vendor/github.com/xdg-go/stringprep/error.go deleted file mode 100644 index 7403e49..0000000 --- a/vendor/github.com/xdg-go/stringprep/error.go +++ /dev/null @@ -1,14 +0,0 @@ -package stringprep - -import "fmt" - -// Error describes problems encountered during stringprep, including what rune -// was problematic. -type Error struct { - Msg string - Rune rune -} - -func (e Error) Error() string { - return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune) -} diff --git a/vendor/github.com/xdg-go/stringprep/map.go b/vendor/github.com/xdg-go/stringprep/map.go deleted file mode 100644 index e56a0dd..0000000 --- a/vendor/github.com/xdg-go/stringprep/map.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -// Mapping represents a stringprep mapping, from a single rune to zero or more -// runes. -type Mapping map[rune][]rune - -// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping. -// The ok return value is false if the rune was not found. -func (m Mapping) Map(r rune) (replacement []rune, ok bool) { - rs, ok := m[r] - if !ok { - return nil, false - } - return rs, true -} diff --git a/vendor/github.com/xdg-go/stringprep/profile.go b/vendor/github.com/xdg-go/stringprep/profile.go deleted file mode 100644 index 5a73be9..0000000 --- a/vendor/github.com/xdg-go/stringprep/profile.go +++ /dev/null @@ -1,75 +0,0 @@ -package stringprep - -import ( - "golang.org/x/text/unicode/norm" -) - -// Profile represents a stringprep profile. -type Profile struct { - Mappings []Mapping - Normalize bool - Prohibits []Set - CheckBiDi bool -} - -var errProhibited = "prohibited character" - -// Prepare transforms an input string to an output string following -// the rules defined in the profile as defined by RFC-3454. -func (p Profile) Prepare(s string) (string, error) { - // Optimistically, assume output will be same length as input - temp := make([]rune, 0, len(s)) - - // Apply maps - for _, r := range s { - rs, ok := p.applyMaps(r) - if ok { - temp = append(temp, rs...) - } else { - temp = append(temp, r) - } - } - - // Normalize - var out string - if p.Normalize { - out = norm.NFKC.String(string(temp)) - } else { - out = string(temp) - } - - // Check prohibited - for _, r := range out { - if p.runeIsProhibited(r) { - return "", Error{Msg: errProhibited, Rune: r} - } - } - - // Check BiDi allowed - if p.CheckBiDi { - if err := passesBiDiRules(out); err != nil { - return "", err - } - } - - return out, nil -} - -func (p Profile) applyMaps(r rune) ([]rune, bool) { - for _, m := range p.Mappings { - rs, ok := m.Map(r) - if ok { - return rs, true - } - } - return nil, false -} - -func (p Profile) runeIsProhibited(r rune) bool { - for _, s := range p.Prohibits { - if s.Contains(r) { - return true - } - } - return false -} diff --git a/vendor/github.com/xdg-go/stringprep/saslprep.go b/vendor/github.com/xdg-go/stringprep/saslprep.go deleted file mode 100644 index 4001348..0000000 --- a/vendor/github.com/xdg-go/stringprep/saslprep.go +++ /dev/null @@ -1,52 +0,0 @@ -package stringprep - -var mapNonASCIISpaceToASCIISpace = Mapping{ - 0x00A0: []rune{0x0020}, - 0x1680: []rune{0x0020}, - 0x2000: []rune{0x0020}, - 0x2001: []rune{0x0020}, - 0x2002: []rune{0x0020}, - 0x2003: []rune{0x0020}, - 0x2004: []rune{0x0020}, - 0x2005: []rune{0x0020}, - 0x2006: []rune{0x0020}, - 0x2007: []rune{0x0020}, - 0x2008: []rune{0x0020}, - 0x2009: []rune{0x0020}, - 0x200A: []rune{0x0020}, - 0x200B: []rune{0x0020}, - 0x202F: []rune{0x0020}, - 0x205F: []rune{0x0020}, - 0x3000: []rune{0x0020}, -} - -// SASLprep is a pre-defined stringprep profile for user names and passwords -// as described in RFC-4013. -// -// Because the stringprep distinction between query and stored strings was -// intended for compatibility across profile versions, but SASLprep was never -// updated and is now deprecated, this profile only operates in stored -// strings mode, prohibiting unassigned code points. -var SASLprep Profile = saslprep - -var saslprep = Profile{ - Mappings: []Mapping{ - TableB1, - mapNonASCIISpaceToASCIISpace, - }, - Normalize: true, - Prohibits: []Set{ - TableA1, - TableC1_2, - TableC2_1, - TableC2_2, - TableC3, - TableC4, - TableC5, - TableC6, - TableC7, - TableC8, - TableC9, - }, - CheckBiDi: true, -} diff --git a/vendor/github.com/xdg-go/stringprep/set.go b/vendor/github.com/xdg-go/stringprep/set.go deleted file mode 100644 index c837e28..0000000 --- a/vendor/github.com/xdg-go/stringprep/set.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -import "sort" - -// RuneRange represents a close-ended range of runes: [N,M]. For a range -// consisting of a single rune, N and M will be equal. -type RuneRange [2]rune - -// Contains returns true if a rune is within the bounds of the RuneRange. -func (rr RuneRange) Contains(r rune) bool { - return rr[0] <= r && r <= rr[1] -} - -func (rr RuneRange) isAbove(r rune) bool { - return r <= rr[0] -} - -// Set represents a stringprep data table used to identify runes of a -// particular type. -type Set []RuneRange - -// Contains returns true if a rune is within any of the RuneRanges in the -// Set. -func (s Set) Contains(r rune) bool { - i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) }) - if i < len(s) && s[i].Contains(r) { - return true - } - return false -} diff --git a/vendor/github.com/xdg-go/stringprep/tables.go b/vendor/github.com/xdg-go/stringprep/tables.go deleted file mode 100644 index c3fc1fa..0000000 --- a/vendor/github.com/xdg-go/stringprep/tables.go +++ /dev/null @@ -1,3215 +0,0 @@ -// Copyright 2018 by David A. Golden. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package stringprep - -var tableA1 = Set{ - RuneRange{0x0221, 0x0221}, - RuneRange{0x0234, 0x024F}, - RuneRange{0x02AE, 0x02AF}, - RuneRange{0x02EF, 0x02FF}, - RuneRange{0x0350, 0x035F}, - RuneRange{0x0370, 0x0373}, - RuneRange{0x0376, 0x0379}, - RuneRange{0x037B, 0x037D}, - RuneRange{0x037F, 0x0383}, - RuneRange{0x038B, 0x038B}, - RuneRange{0x038D, 0x038D}, - RuneRange{0x03A2, 0x03A2}, - RuneRange{0x03CF, 0x03CF}, - RuneRange{0x03F7, 0x03FF}, - RuneRange{0x0487, 0x0487}, - RuneRange{0x04CF, 0x04CF}, - RuneRange{0x04F6, 0x04F7}, - RuneRange{0x04FA, 0x04FF}, - RuneRange{0x0510, 0x0530}, - RuneRange{0x0557, 0x0558}, - RuneRange{0x0560, 0x0560}, - RuneRange{0x0588, 0x0588}, - RuneRange{0x058B, 0x0590}, - RuneRange{0x05A2, 0x05A2}, - RuneRange{0x05BA, 0x05BA}, - RuneRange{0x05C5, 0x05CF}, - RuneRange{0x05EB, 0x05EF}, - RuneRange{0x05F5, 0x060B}, - RuneRange{0x060D, 0x061A}, - RuneRange{0x061C, 0x061E}, - RuneRange{0x0620, 0x0620}, - RuneRange{0x063B, 0x063F}, - RuneRange{0x0656, 0x065F}, - RuneRange{0x06EE, 0x06EF}, - RuneRange{0x06FF, 0x06FF}, - RuneRange{0x070E, 0x070E}, - RuneRange{0x072D, 0x072F}, - RuneRange{0x074B, 0x077F}, - RuneRange{0x07B2, 0x0900}, - RuneRange{0x0904, 0x0904}, - RuneRange{0x093A, 0x093B}, - RuneRange{0x094E, 0x094F}, - RuneRange{0x0955, 0x0957}, - RuneRange{0x0971, 0x0980}, - RuneRange{0x0984, 0x0984}, - RuneRange{0x098D, 0x098E}, - RuneRange{0x0991, 0x0992}, - RuneRange{0x09A9, 0x09A9}, - RuneRange{0x09B1, 0x09B1}, - RuneRange{0x09B3, 0x09B5}, - RuneRange{0x09BA, 0x09BB}, - RuneRange{0x09BD, 0x09BD}, - RuneRange{0x09C5, 0x09C6}, - RuneRange{0x09C9, 0x09CA}, - RuneRange{0x09CE, 0x09D6}, - RuneRange{0x09D8, 0x09DB}, - RuneRange{0x09DE, 0x09DE}, - RuneRange{0x09E4, 0x09E5}, - RuneRange{0x09FB, 0x0A01}, - RuneRange{0x0A03, 0x0A04}, - RuneRange{0x0A0B, 0x0A0E}, - RuneRange{0x0A11, 0x0A12}, - RuneRange{0x0A29, 0x0A29}, - RuneRange{0x0A31, 0x0A31}, - RuneRange{0x0A34, 0x0A34}, - RuneRange{0x0A37, 0x0A37}, - RuneRange{0x0A3A, 0x0A3B}, - RuneRange{0x0A3D, 0x0A3D}, - RuneRange{0x0A43, 0x0A46}, - RuneRange{0x0A49, 0x0A4A}, - RuneRange{0x0A4E, 0x0A58}, - RuneRange{0x0A5D, 0x0A5D}, - RuneRange{0x0A5F, 0x0A65}, - RuneRange{0x0A75, 0x0A80}, - RuneRange{0x0A84, 0x0A84}, - RuneRange{0x0A8C, 0x0A8C}, - RuneRange{0x0A8E, 0x0A8E}, - RuneRange{0x0A92, 0x0A92}, - RuneRange{0x0AA9, 0x0AA9}, - RuneRange{0x0AB1, 0x0AB1}, - RuneRange{0x0AB4, 0x0AB4}, - RuneRange{0x0ABA, 0x0ABB}, - RuneRange{0x0AC6, 0x0AC6}, - RuneRange{0x0ACA, 0x0ACA}, - RuneRange{0x0ACE, 0x0ACF}, - RuneRange{0x0AD1, 0x0ADF}, - RuneRange{0x0AE1, 0x0AE5}, - RuneRange{0x0AF0, 0x0B00}, - RuneRange{0x0B04, 0x0B04}, - RuneRange{0x0B0D, 0x0B0E}, - RuneRange{0x0B11, 0x0B12}, - RuneRange{0x0B29, 0x0B29}, - RuneRange{0x0B31, 0x0B31}, - RuneRange{0x0B34, 0x0B35}, - RuneRange{0x0B3A, 0x0B3B}, - RuneRange{0x0B44, 0x0B46}, - RuneRange{0x0B49, 0x0B4A}, - RuneRange{0x0B4E, 0x0B55}, - RuneRange{0x0B58, 0x0B5B}, - RuneRange{0x0B5E, 0x0B5E}, - RuneRange{0x0B62, 0x0B65}, - RuneRange{0x0B71, 0x0B81}, - RuneRange{0x0B84, 0x0B84}, - RuneRange{0x0B8B, 0x0B8D}, - RuneRange{0x0B91, 0x0B91}, - RuneRange{0x0B96, 0x0B98}, - RuneRange{0x0B9B, 0x0B9B}, - RuneRange{0x0B9D, 0x0B9D}, - RuneRange{0x0BA0, 0x0BA2}, - RuneRange{0x0BA5, 0x0BA7}, - RuneRange{0x0BAB, 0x0BAD}, - RuneRange{0x0BB6, 0x0BB6}, - RuneRange{0x0BBA, 0x0BBD}, - RuneRange{0x0BC3, 0x0BC5}, - RuneRange{0x0BC9, 0x0BC9}, - RuneRange{0x0BCE, 0x0BD6}, - RuneRange{0x0BD8, 0x0BE6}, - RuneRange{0x0BF3, 0x0C00}, - RuneRange{0x0C04, 0x0C04}, - RuneRange{0x0C0D, 0x0C0D}, - RuneRange{0x0C11, 0x0C11}, - RuneRange{0x0C29, 0x0C29}, - RuneRange{0x0C34, 0x0C34}, - RuneRange{0x0C3A, 0x0C3D}, - RuneRange{0x0C45, 0x0C45}, - RuneRange{0x0C49, 0x0C49}, - RuneRange{0x0C4E, 0x0C54}, - RuneRange{0x0C57, 0x0C5F}, - RuneRange{0x0C62, 0x0C65}, - RuneRange{0x0C70, 0x0C81}, - RuneRange{0x0C84, 0x0C84}, - RuneRange{0x0C8D, 0x0C8D}, - RuneRange{0x0C91, 0x0C91}, - RuneRange{0x0CA9, 0x0CA9}, - RuneRange{0x0CB4, 0x0CB4}, - RuneRange{0x0CBA, 0x0CBD}, - RuneRange{0x0CC5, 0x0CC5}, - RuneRange{0x0CC9, 0x0CC9}, - RuneRange{0x0CCE, 0x0CD4}, - RuneRange{0x0CD7, 0x0CDD}, - RuneRange{0x0CDF, 0x0CDF}, - RuneRange{0x0CE2, 0x0CE5}, - RuneRange{0x0CF0, 0x0D01}, - RuneRange{0x0D04, 0x0D04}, - RuneRange{0x0D0D, 0x0D0D}, - RuneRange{0x0D11, 0x0D11}, - RuneRange{0x0D29, 0x0D29}, - RuneRange{0x0D3A, 0x0D3D}, - RuneRange{0x0D44, 0x0D45}, - RuneRange{0x0D49, 0x0D49}, - RuneRange{0x0D4E, 0x0D56}, - RuneRange{0x0D58, 0x0D5F}, - RuneRange{0x0D62, 0x0D65}, - RuneRange{0x0D70, 0x0D81}, - RuneRange{0x0D84, 0x0D84}, - RuneRange{0x0D97, 0x0D99}, - RuneRange{0x0DB2, 0x0DB2}, - RuneRange{0x0DBC, 0x0DBC}, - RuneRange{0x0DBE, 0x0DBF}, - RuneRange{0x0DC7, 0x0DC9}, - RuneRange{0x0DCB, 0x0DCE}, - RuneRange{0x0DD5, 0x0DD5}, - RuneRange{0x0DD7, 0x0DD7}, - RuneRange{0x0DE0, 0x0DF1}, - RuneRange{0x0DF5, 0x0E00}, - RuneRange{0x0E3B, 0x0E3E}, - RuneRange{0x0E5C, 0x0E80}, - RuneRange{0x0E83, 0x0E83}, - RuneRange{0x0E85, 0x0E86}, - RuneRange{0x0E89, 0x0E89}, - RuneRange{0x0E8B, 0x0E8C}, - RuneRange{0x0E8E, 0x0E93}, - RuneRange{0x0E98, 0x0E98}, - RuneRange{0x0EA0, 0x0EA0}, - RuneRange{0x0EA4, 0x0EA4}, - RuneRange{0x0EA6, 0x0EA6}, - RuneRange{0x0EA8, 0x0EA9}, - RuneRange{0x0EAC, 0x0EAC}, - RuneRange{0x0EBA, 0x0EBA}, - RuneRange{0x0EBE, 0x0EBF}, - RuneRange{0x0EC5, 0x0EC5}, - RuneRange{0x0EC7, 0x0EC7}, - RuneRange{0x0ECE, 0x0ECF}, - RuneRange{0x0EDA, 0x0EDB}, - RuneRange{0x0EDE, 0x0EFF}, - RuneRange{0x0F48, 0x0F48}, - RuneRange{0x0F6B, 0x0F70}, - RuneRange{0x0F8C, 0x0F8F}, - RuneRange{0x0F98, 0x0F98}, - RuneRange{0x0FBD, 0x0FBD}, - RuneRange{0x0FCD, 0x0FCE}, - RuneRange{0x0FD0, 0x0FFF}, - RuneRange{0x1022, 0x1022}, - RuneRange{0x1028, 0x1028}, - RuneRange{0x102B, 0x102B}, - RuneRange{0x1033, 0x1035}, - RuneRange{0x103A, 0x103F}, - RuneRange{0x105A, 0x109F}, - RuneRange{0x10C6, 0x10CF}, - RuneRange{0x10F9, 0x10FA}, - RuneRange{0x10FC, 0x10FF}, - RuneRange{0x115A, 0x115E}, - RuneRange{0x11A3, 0x11A7}, - RuneRange{0x11FA, 0x11FF}, - RuneRange{0x1207, 0x1207}, - RuneRange{0x1247, 0x1247}, - RuneRange{0x1249, 0x1249}, - RuneRange{0x124E, 0x124F}, - RuneRange{0x1257, 0x1257}, - RuneRange{0x1259, 0x1259}, - RuneRange{0x125E, 0x125F}, - RuneRange{0x1287, 0x1287}, - RuneRange{0x1289, 0x1289}, - RuneRange{0x128E, 0x128F}, - RuneRange{0x12AF, 0x12AF}, - RuneRange{0x12B1, 0x12B1}, - RuneRange{0x12B6, 0x12B7}, - RuneRange{0x12BF, 0x12BF}, - RuneRange{0x12C1, 0x12C1}, - RuneRange{0x12C6, 0x12C7}, - RuneRange{0x12CF, 0x12CF}, - RuneRange{0x12D7, 0x12D7}, - RuneRange{0x12EF, 0x12EF}, - RuneRange{0x130F, 0x130F}, - RuneRange{0x1311, 0x1311}, - RuneRange{0x1316, 0x1317}, - RuneRange{0x131F, 0x131F}, - RuneRange{0x1347, 0x1347}, - RuneRange{0x135B, 0x1360}, - RuneRange{0x137D, 0x139F}, - RuneRange{0x13F5, 0x1400}, - RuneRange{0x1677, 0x167F}, - RuneRange{0x169D, 0x169F}, - RuneRange{0x16F1, 0x16FF}, - RuneRange{0x170D, 0x170D}, - RuneRange{0x1715, 0x171F}, - RuneRange{0x1737, 0x173F}, - RuneRange{0x1754, 0x175F}, - RuneRange{0x176D, 0x176D}, - RuneRange{0x1771, 0x1771}, - RuneRange{0x1774, 0x177F}, - RuneRange{0x17DD, 0x17DF}, - RuneRange{0x17EA, 0x17FF}, - RuneRange{0x180F, 0x180F}, - RuneRange{0x181A, 0x181F}, - RuneRange{0x1878, 0x187F}, - RuneRange{0x18AA, 0x1DFF}, - RuneRange{0x1E9C, 0x1E9F}, - RuneRange{0x1EFA, 0x1EFF}, - RuneRange{0x1F16, 0x1F17}, - RuneRange{0x1F1E, 0x1F1F}, - RuneRange{0x1F46, 0x1F47}, - RuneRange{0x1F4E, 0x1F4F}, - RuneRange{0x1F58, 0x1F58}, - RuneRange{0x1F5A, 0x1F5A}, - RuneRange{0x1F5C, 0x1F5C}, - RuneRange{0x1F5E, 0x1F5E}, - RuneRange{0x1F7E, 0x1F7F}, - RuneRange{0x1FB5, 0x1FB5}, - RuneRange{0x1FC5, 0x1FC5}, - RuneRange{0x1FD4, 0x1FD5}, - RuneRange{0x1FDC, 0x1FDC}, - RuneRange{0x1FF0, 0x1FF1}, - RuneRange{0x1FF5, 0x1FF5}, - RuneRange{0x1FFF, 0x1FFF}, - RuneRange{0x2053, 0x2056}, - RuneRange{0x2058, 0x205E}, - RuneRange{0x2064, 0x2069}, - RuneRange{0x2072, 0x2073}, - RuneRange{0x208F, 0x209F}, - RuneRange{0x20B2, 0x20CF}, - RuneRange{0x20EB, 0x20FF}, - RuneRange{0x213B, 0x213C}, - RuneRange{0x214C, 0x2152}, - RuneRange{0x2184, 0x218F}, - RuneRange{0x23CF, 0x23FF}, - RuneRange{0x2427, 0x243F}, - RuneRange{0x244B, 0x245F}, - RuneRange{0x24FF, 0x24FF}, - RuneRange{0x2614, 0x2615}, - RuneRange{0x2618, 0x2618}, - RuneRange{0x267E, 0x267F}, - RuneRange{0x268A, 0x2700}, - RuneRange{0x2705, 0x2705}, - RuneRange{0x270A, 0x270B}, - RuneRange{0x2728, 0x2728}, - RuneRange{0x274C, 0x274C}, - RuneRange{0x274E, 0x274E}, - RuneRange{0x2753, 0x2755}, - RuneRange{0x2757, 0x2757}, - RuneRange{0x275F, 0x2760}, - RuneRange{0x2795, 0x2797}, - RuneRange{0x27B0, 0x27B0}, - RuneRange{0x27BF, 0x27CF}, - RuneRange{0x27EC, 0x27EF}, - RuneRange{0x2B00, 0x2E7F}, - RuneRange{0x2E9A, 0x2E9A}, - RuneRange{0x2EF4, 0x2EFF}, - RuneRange{0x2FD6, 0x2FEF}, - RuneRange{0x2FFC, 0x2FFF}, - RuneRange{0x3040, 0x3040}, - RuneRange{0x3097, 0x3098}, - RuneRange{0x3100, 0x3104}, - RuneRange{0x312D, 0x3130}, - RuneRange{0x318F, 0x318F}, - RuneRange{0x31B8, 0x31EF}, - RuneRange{0x321D, 0x321F}, - RuneRange{0x3244, 0x3250}, - RuneRange{0x327C, 0x327E}, - RuneRange{0x32CC, 0x32CF}, - RuneRange{0x32FF, 0x32FF}, - RuneRange{0x3377, 0x337A}, - RuneRange{0x33DE, 0x33DF}, - RuneRange{0x33FF, 0x33FF}, - RuneRange{0x4DB6, 0x4DFF}, - RuneRange{0x9FA6, 0x9FFF}, - RuneRange{0xA48D, 0xA48F}, - RuneRange{0xA4C7, 0xABFF}, - RuneRange{0xD7A4, 0xD7FF}, - RuneRange{0xFA2E, 0xFA2F}, - RuneRange{0xFA6B, 0xFAFF}, - RuneRange{0xFB07, 0xFB12}, - RuneRange{0xFB18, 0xFB1C}, - RuneRange{0xFB37, 0xFB37}, - RuneRange{0xFB3D, 0xFB3D}, - RuneRange{0xFB3F, 0xFB3F}, - RuneRange{0xFB42, 0xFB42}, - RuneRange{0xFB45, 0xFB45}, - RuneRange{0xFBB2, 0xFBD2}, - RuneRange{0xFD40, 0xFD4F}, - RuneRange{0xFD90, 0xFD91}, - RuneRange{0xFDC8, 0xFDCF}, - RuneRange{0xFDFD, 0xFDFF}, - RuneRange{0xFE10, 0xFE1F}, - RuneRange{0xFE24, 0xFE2F}, - RuneRange{0xFE47, 0xFE48}, - RuneRange{0xFE53, 0xFE53}, - RuneRange{0xFE67, 0xFE67}, - RuneRange{0xFE6C, 0xFE6F}, - RuneRange{0xFE75, 0xFE75}, - RuneRange{0xFEFD, 0xFEFE}, - RuneRange{0xFF00, 0xFF00}, - RuneRange{0xFFBF, 0xFFC1}, - RuneRange{0xFFC8, 0xFFC9}, - RuneRange{0xFFD0, 0xFFD1}, - RuneRange{0xFFD8, 0xFFD9}, - RuneRange{0xFFDD, 0xFFDF}, - RuneRange{0xFFE7, 0xFFE7}, - RuneRange{0xFFEF, 0xFFF8}, - RuneRange{0x10000, 0x102FF}, - RuneRange{0x1031F, 0x1031F}, - RuneRange{0x10324, 0x1032F}, - RuneRange{0x1034B, 0x103FF}, - RuneRange{0x10426, 0x10427}, - RuneRange{0x1044E, 0x1CFFF}, - RuneRange{0x1D0F6, 0x1D0FF}, - RuneRange{0x1D127, 0x1D129}, - RuneRange{0x1D1DE, 0x1D3FF}, - RuneRange{0x1D455, 0x1D455}, - RuneRange{0x1D49D, 0x1D49D}, - RuneRange{0x1D4A0, 0x1D4A1}, - RuneRange{0x1D4A3, 0x1D4A4}, - RuneRange{0x1D4A7, 0x1D4A8}, - RuneRange{0x1D4AD, 0x1D4AD}, - RuneRange{0x1D4BA, 0x1D4BA}, - RuneRange{0x1D4BC, 0x1D4BC}, - RuneRange{0x1D4C1, 0x1D4C1}, - RuneRange{0x1D4C4, 0x1D4C4}, - RuneRange{0x1D506, 0x1D506}, - RuneRange{0x1D50B, 0x1D50C}, - RuneRange{0x1D515, 0x1D515}, - RuneRange{0x1D51D, 0x1D51D}, - RuneRange{0x1D53A, 0x1D53A}, - RuneRange{0x1D53F, 0x1D53F}, - RuneRange{0x1D545, 0x1D545}, - RuneRange{0x1D547, 0x1D549}, - RuneRange{0x1D551, 0x1D551}, - RuneRange{0x1D6A4, 0x1D6A7}, - RuneRange{0x1D7CA, 0x1D7CD}, - RuneRange{0x1D800, 0x1FFFD}, - RuneRange{0x2A6D7, 0x2F7FF}, - RuneRange{0x2FA1E, 0x2FFFD}, - RuneRange{0x30000, 0x3FFFD}, - RuneRange{0x40000, 0x4FFFD}, - RuneRange{0x50000, 0x5FFFD}, - RuneRange{0x60000, 0x6FFFD}, - RuneRange{0x70000, 0x7FFFD}, - RuneRange{0x80000, 0x8FFFD}, - RuneRange{0x90000, 0x9FFFD}, - RuneRange{0xA0000, 0xAFFFD}, - RuneRange{0xB0000, 0xBFFFD}, - RuneRange{0xC0000, 0xCFFFD}, - RuneRange{0xD0000, 0xDFFFD}, - RuneRange{0xE0000, 0xE0000}, - RuneRange{0xE0002, 0xE001F}, - RuneRange{0xE0080, 0xEFFFD}, -} - -// TableA1 represents RFC-3454 Table A.1. -var TableA1 Set = tableA1 - -var tableB1 = Mapping{ - 0x00AD: []rune{}, // Map to nothing - 0x034F: []rune{}, // Map to nothing - 0x180B: []rune{}, // Map to nothing - 0x180C: []rune{}, // Map to nothing - 0x180D: []rune{}, // Map to nothing - 0x200B: []rune{}, // Map to nothing - 0x200C: []rune{}, // Map to nothing - 0x200D: []rune{}, // Map to nothing - 0x2060: []rune{}, // Map to nothing - 0xFE00: []rune{}, // Map to nothing - 0xFE01: []rune{}, // Map to nothing - 0xFE02: []rune{}, // Map to nothing - 0xFE03: []rune{}, // Map to nothing - 0xFE04: []rune{}, // Map to nothing - 0xFE05: []rune{}, // Map to nothing - 0xFE06: []rune{}, // Map to nothing - 0xFE07: []rune{}, // Map to nothing - 0xFE08: []rune{}, // Map to nothing - 0xFE09: []rune{}, // Map to nothing - 0xFE0A: []rune{}, // Map to nothing - 0xFE0B: []rune{}, // Map to nothing - 0xFE0C: []rune{}, // Map to nothing - 0xFE0D: []rune{}, // Map to nothing - 0xFE0E: []rune{}, // Map to nothing - 0xFE0F: []rune{}, // Map to nothing - 0xFEFF: []rune{}, // Map to nothing -} - -// TableB1 represents RFC-3454 Table B.1. -var TableB1 Mapping = tableB1 - -var tableB2 = Mapping{ - 0x0041: []rune{0x0061}, // Case map - 0x0042: []rune{0x0062}, // Case map - 0x0043: []rune{0x0063}, // Case map - 0x0044: []rune{0x0064}, // Case map - 0x0045: []rune{0x0065}, // Case map - 0x0046: []rune{0x0066}, // Case map - 0x0047: []rune{0x0067}, // Case map - 0x0048: []rune{0x0068}, // Case map - 0x0049: []rune{0x0069}, // Case map - 0x004A: []rune{0x006A}, // Case map - 0x004B: []rune{0x006B}, // Case map - 0x004C: []rune{0x006C}, // Case map - 0x004D: []rune{0x006D}, // Case map - 0x004E: []rune{0x006E}, // Case map - 0x004F: []rune{0x006F}, // Case map - 0x0050: []rune{0x0070}, // Case map - 0x0051: []rune{0x0071}, // Case map - 0x0052: []rune{0x0072}, // Case map - 0x0053: []rune{0x0073}, // Case map - 0x0054: []rune{0x0074}, // Case map - 0x0055: []rune{0x0075}, // Case map - 0x0056: []rune{0x0076}, // Case map - 0x0057: []rune{0x0077}, // Case map - 0x0058: []rune{0x0078}, // Case map - 0x0059: []rune{0x0079}, // Case map - 0x005A: []rune{0x007A}, // Case map - 0x00B5: []rune{0x03BC}, // Case map - 0x00C0: []rune{0x00E0}, // Case map - 0x00C1: []rune{0x00E1}, // Case map - 0x00C2: []rune{0x00E2}, // Case map - 0x00C3: []rune{0x00E3}, // Case map - 0x00C4: []rune{0x00E4}, // Case map - 0x00C5: []rune{0x00E5}, // Case map - 0x00C6: []rune{0x00E6}, // Case map - 0x00C7: []rune{0x00E7}, // Case map - 0x00C8: []rune{0x00E8}, // Case map - 0x00C9: []rune{0x00E9}, // Case map - 0x00CA: []rune{0x00EA}, // Case map - 0x00CB: []rune{0x00EB}, // Case map - 0x00CC: []rune{0x00EC}, // Case map - 0x00CD: []rune{0x00ED}, // Case map - 0x00CE: []rune{0x00EE}, // Case map - 0x00CF: []rune{0x00EF}, // Case map - 0x00D0: []rune{0x00F0}, // Case map - 0x00D1: []rune{0x00F1}, // Case map - 0x00D2: []rune{0x00F2}, // Case map - 0x00D3: []rune{0x00F3}, // Case map - 0x00D4: []rune{0x00F4}, // Case map - 0x00D5: []rune{0x00F5}, // Case map - 0x00D6: []rune{0x00F6}, // Case map - 0x00D8: []rune{0x00F8}, // Case map - 0x00D9: []rune{0x00F9}, // Case map - 0x00DA: []rune{0x00FA}, // Case map - 0x00DB: []rune{0x00FB}, // Case map - 0x00DC: []rune{0x00FC}, // Case map - 0x00DD: []rune{0x00FD}, // Case map - 0x00DE: []rune{0x00FE}, // Case map - 0x00DF: []rune{0x0073, 0x0073}, // Case map - 0x0100: []rune{0x0101}, // Case map - 0x0102: []rune{0x0103}, // Case map - 0x0104: []rune{0x0105}, // Case map - 0x0106: []rune{0x0107}, // Case map - 0x0108: []rune{0x0109}, // Case map - 0x010A: []rune{0x010B}, // Case map - 0x010C: []rune{0x010D}, // Case map - 0x010E: []rune{0x010F}, // Case map - 0x0110: []rune{0x0111}, // Case map - 0x0112: []rune{0x0113}, // Case map - 0x0114: []rune{0x0115}, // Case map - 0x0116: []rune{0x0117}, // Case map - 0x0118: []rune{0x0119}, // Case map - 0x011A: []rune{0x011B}, // Case map - 0x011C: []rune{0x011D}, // Case map - 0x011E: []rune{0x011F}, // Case map - 0x0120: []rune{0x0121}, // Case map - 0x0122: []rune{0x0123}, // Case map - 0x0124: []rune{0x0125}, // Case map - 0x0126: []rune{0x0127}, // Case map - 0x0128: []rune{0x0129}, // Case map - 0x012A: []rune{0x012B}, // Case map - 0x012C: []rune{0x012D}, // Case map - 0x012E: []rune{0x012F}, // Case map - 0x0130: []rune{0x0069, 0x0307}, // Case map - 0x0132: []rune{0x0133}, // Case map - 0x0134: []rune{0x0135}, // Case map - 0x0136: []rune{0x0137}, // Case map - 0x0139: []rune{0x013A}, // Case map - 0x013B: []rune{0x013C}, // Case map - 0x013D: []rune{0x013E}, // Case map - 0x013F: []rune{0x0140}, // Case map - 0x0141: []rune{0x0142}, // Case map - 0x0143: []rune{0x0144}, // Case map - 0x0145: []rune{0x0146}, // Case map - 0x0147: []rune{0x0148}, // Case map - 0x0149: []rune{0x02BC, 0x006E}, // Case map - 0x014A: []rune{0x014B}, // Case map - 0x014C: []rune{0x014D}, // Case map - 0x014E: []rune{0x014F}, // Case map - 0x0150: []rune{0x0151}, // Case map - 0x0152: []rune{0x0153}, // Case map - 0x0154: []rune{0x0155}, // Case map - 0x0156: []rune{0x0157}, // Case map - 0x0158: []rune{0x0159}, // Case map - 0x015A: []rune{0x015B}, // Case map - 0x015C: []rune{0x015D}, // Case map - 0x015E: []rune{0x015F}, // Case map - 0x0160: []rune{0x0161}, // Case map - 0x0162: []rune{0x0163}, // Case map - 0x0164: []rune{0x0165}, // Case map - 0x0166: []rune{0x0167}, // Case map - 0x0168: []rune{0x0169}, // Case map - 0x016A: []rune{0x016B}, // Case map - 0x016C: []rune{0x016D}, // Case map - 0x016E: []rune{0x016F}, // Case map - 0x0170: []rune{0x0171}, // Case map - 0x0172: []rune{0x0173}, // Case map - 0x0174: []rune{0x0175}, // Case map - 0x0176: []rune{0x0177}, // Case map - 0x0178: []rune{0x00FF}, // Case map - 0x0179: []rune{0x017A}, // Case map - 0x017B: []rune{0x017C}, // Case map - 0x017D: []rune{0x017E}, // Case map - 0x017F: []rune{0x0073}, // Case map - 0x0181: []rune{0x0253}, // Case map - 0x0182: []rune{0x0183}, // Case map - 0x0184: []rune{0x0185}, // Case map - 0x0186: []rune{0x0254}, // Case map - 0x0187: []rune{0x0188}, // Case map - 0x0189: []rune{0x0256}, // Case map - 0x018A: []rune{0x0257}, // Case map - 0x018B: []rune{0x018C}, // Case map - 0x018E: []rune{0x01DD}, // Case map - 0x018F: []rune{0x0259}, // Case map - 0x0190: []rune{0x025B}, // Case map - 0x0191: []rune{0x0192}, // Case map - 0x0193: []rune{0x0260}, // Case map - 0x0194: []rune{0x0263}, // Case map - 0x0196: []rune{0x0269}, // Case map - 0x0197: []rune{0x0268}, // Case map - 0x0198: []rune{0x0199}, // Case map - 0x019C: []rune{0x026F}, // Case map - 0x019D: []rune{0x0272}, // Case map - 0x019F: []rune{0x0275}, // Case map - 0x01A0: []rune{0x01A1}, // Case map - 0x01A2: []rune{0x01A3}, // Case map - 0x01A4: []rune{0x01A5}, // Case map - 0x01A6: []rune{0x0280}, // Case map - 0x01A7: []rune{0x01A8}, // Case map - 0x01A9: []rune{0x0283}, // Case map - 0x01AC: []rune{0x01AD}, // Case map - 0x01AE: []rune{0x0288}, // Case map - 0x01AF: []rune{0x01B0}, // Case map - 0x01B1: []rune{0x028A}, // Case map - 0x01B2: []rune{0x028B}, // Case map - 0x01B3: []rune{0x01B4}, // Case map - 0x01B5: []rune{0x01B6}, // Case map - 0x01B7: []rune{0x0292}, // Case map - 0x01B8: []rune{0x01B9}, // Case map - 0x01BC: []rune{0x01BD}, // Case map - 0x01C4: []rune{0x01C6}, // Case map - 0x01C5: []rune{0x01C6}, // Case map - 0x01C7: []rune{0x01C9}, // Case map - 0x01C8: []rune{0x01C9}, // Case map - 0x01CA: []rune{0x01CC}, // Case map - 0x01CB: []rune{0x01CC}, // Case map - 0x01CD: []rune{0x01CE}, // Case map - 0x01CF: []rune{0x01D0}, // Case map - 0x01D1: []rune{0x01D2}, // Case map - 0x01D3: []rune{0x01D4}, // Case map - 0x01D5: []rune{0x01D6}, // Case map - 0x01D7: []rune{0x01D8}, // Case map - 0x01D9: []rune{0x01DA}, // Case map - 0x01DB: []rune{0x01DC}, // Case map - 0x01DE: []rune{0x01DF}, // Case map - 0x01E0: []rune{0x01E1}, // Case map - 0x01E2: []rune{0x01E3}, // Case map - 0x01E4: []rune{0x01E5}, // Case map - 0x01E6: []rune{0x01E7}, // Case map - 0x01E8: []rune{0x01E9}, // Case map - 0x01EA: []rune{0x01EB}, // Case map - 0x01EC: []rune{0x01ED}, // Case map - 0x01EE: []rune{0x01EF}, // Case map - 0x01F0: []rune{0x006A, 0x030C}, // Case map - 0x01F1: []rune{0x01F3}, // Case map - 0x01F2: []rune{0x01F3}, // Case map - 0x01F4: []rune{0x01F5}, // Case map - 0x01F6: []rune{0x0195}, // Case map - 0x01F7: []rune{0x01BF}, // Case map - 0x01F8: []rune{0x01F9}, // Case map - 0x01FA: []rune{0x01FB}, // Case map - 0x01FC: []rune{0x01FD}, // Case map - 0x01FE: []rune{0x01FF}, // Case map - 0x0200: []rune{0x0201}, // Case map - 0x0202: []rune{0x0203}, // Case map - 0x0204: []rune{0x0205}, // Case map - 0x0206: []rune{0x0207}, // Case map - 0x0208: []rune{0x0209}, // Case map - 0x020A: []rune{0x020B}, // Case map - 0x020C: []rune{0x020D}, // Case map - 0x020E: []rune{0x020F}, // Case map - 0x0210: []rune{0x0211}, // Case map - 0x0212: []rune{0x0213}, // Case map - 0x0214: []rune{0x0215}, // Case map - 0x0216: []rune{0x0217}, // Case map - 0x0218: []rune{0x0219}, // Case map - 0x021A: []rune{0x021B}, // Case map - 0x021C: []rune{0x021D}, // Case map - 0x021E: []rune{0x021F}, // Case map - 0x0220: []rune{0x019E}, // Case map - 0x0222: []rune{0x0223}, // Case map - 0x0224: []rune{0x0225}, // Case map - 0x0226: []rune{0x0227}, // Case map - 0x0228: []rune{0x0229}, // Case map - 0x022A: []rune{0x022B}, // Case map - 0x022C: []rune{0x022D}, // Case map - 0x022E: []rune{0x022F}, // Case map - 0x0230: []rune{0x0231}, // Case map - 0x0232: []rune{0x0233}, // Case map - 0x0345: []rune{0x03B9}, // Case map - 0x037A: []rune{0x0020, 0x03B9}, // Additional folding - 0x0386: []rune{0x03AC}, // Case map - 0x0388: []rune{0x03AD}, // Case map - 0x0389: []rune{0x03AE}, // Case map - 0x038A: []rune{0x03AF}, // Case map - 0x038C: []rune{0x03CC}, // Case map - 0x038E: []rune{0x03CD}, // Case map - 0x038F: []rune{0x03CE}, // Case map - 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x0391: []rune{0x03B1}, // Case map - 0x0392: []rune{0x03B2}, // Case map - 0x0393: []rune{0x03B3}, // Case map - 0x0394: []rune{0x03B4}, // Case map - 0x0395: []rune{0x03B5}, // Case map - 0x0396: []rune{0x03B6}, // Case map - 0x0397: []rune{0x03B7}, // Case map - 0x0398: []rune{0x03B8}, // Case map - 0x0399: []rune{0x03B9}, // Case map - 0x039A: []rune{0x03BA}, // Case map - 0x039B: []rune{0x03BB}, // Case map - 0x039C: []rune{0x03BC}, // Case map - 0x039D: []rune{0x03BD}, // Case map - 0x039E: []rune{0x03BE}, // Case map - 0x039F: []rune{0x03BF}, // Case map - 0x03A0: []rune{0x03C0}, // Case map - 0x03A1: []rune{0x03C1}, // Case map - 0x03A3: []rune{0x03C3}, // Case map - 0x03A4: []rune{0x03C4}, // Case map - 0x03A5: []rune{0x03C5}, // Case map - 0x03A6: []rune{0x03C6}, // Case map - 0x03A7: []rune{0x03C7}, // Case map - 0x03A8: []rune{0x03C8}, // Case map - 0x03A9: []rune{0x03C9}, // Case map - 0x03AA: []rune{0x03CA}, // Case map - 0x03AB: []rune{0x03CB}, // Case map - 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x03C2: []rune{0x03C3}, // Case map - 0x03D0: []rune{0x03B2}, // Case map - 0x03D1: []rune{0x03B8}, // Case map - 0x03D2: []rune{0x03C5}, // Additional folding - 0x03D3: []rune{0x03CD}, // Additional folding - 0x03D4: []rune{0x03CB}, // Additional folding - 0x03D5: []rune{0x03C6}, // Case map - 0x03D6: []rune{0x03C0}, // Case map - 0x03D8: []rune{0x03D9}, // Case map - 0x03DA: []rune{0x03DB}, // Case map - 0x03DC: []rune{0x03DD}, // Case map - 0x03DE: []rune{0x03DF}, // Case map - 0x03E0: []rune{0x03E1}, // Case map - 0x03E2: []rune{0x03E3}, // Case map - 0x03E4: []rune{0x03E5}, // Case map - 0x03E6: []rune{0x03E7}, // Case map - 0x03E8: []rune{0x03E9}, // Case map - 0x03EA: []rune{0x03EB}, // Case map - 0x03EC: []rune{0x03ED}, // Case map - 0x03EE: []rune{0x03EF}, // Case map - 0x03F0: []rune{0x03BA}, // Case map - 0x03F1: []rune{0x03C1}, // Case map - 0x03F2: []rune{0x03C3}, // Case map - 0x03F4: []rune{0x03B8}, // Case map - 0x03F5: []rune{0x03B5}, // Case map - 0x0400: []rune{0x0450}, // Case map - 0x0401: []rune{0x0451}, // Case map - 0x0402: []rune{0x0452}, // Case map - 0x0403: []rune{0x0453}, // Case map - 0x0404: []rune{0x0454}, // Case map - 0x0405: []rune{0x0455}, // Case map - 0x0406: []rune{0x0456}, // Case map - 0x0407: []rune{0x0457}, // Case map - 0x0408: []rune{0x0458}, // Case map - 0x0409: []rune{0x0459}, // Case map - 0x040A: []rune{0x045A}, // Case map - 0x040B: []rune{0x045B}, // Case map - 0x040C: []rune{0x045C}, // Case map - 0x040D: []rune{0x045D}, // Case map - 0x040E: []rune{0x045E}, // Case map - 0x040F: []rune{0x045F}, // Case map - 0x0410: []rune{0x0430}, // Case map - 0x0411: []rune{0x0431}, // Case map - 0x0412: []rune{0x0432}, // Case map - 0x0413: []rune{0x0433}, // Case map - 0x0414: []rune{0x0434}, // Case map - 0x0415: []rune{0x0435}, // Case map - 0x0416: []rune{0x0436}, // Case map - 0x0417: []rune{0x0437}, // Case map - 0x0418: []rune{0x0438}, // Case map - 0x0419: []rune{0x0439}, // Case map - 0x041A: []rune{0x043A}, // Case map - 0x041B: []rune{0x043B}, // Case map - 0x041C: []rune{0x043C}, // Case map - 0x041D: []rune{0x043D}, // Case map - 0x041E: []rune{0x043E}, // Case map - 0x041F: []rune{0x043F}, // Case map - 0x0420: []rune{0x0440}, // Case map - 0x0421: []rune{0x0441}, // Case map - 0x0422: []rune{0x0442}, // Case map - 0x0423: []rune{0x0443}, // Case map - 0x0424: []rune{0x0444}, // Case map - 0x0425: []rune{0x0445}, // Case map - 0x0426: []rune{0x0446}, // Case map - 0x0427: []rune{0x0447}, // Case map - 0x0428: []rune{0x0448}, // Case map - 0x0429: []rune{0x0449}, // Case map - 0x042A: []rune{0x044A}, // Case map - 0x042B: []rune{0x044B}, // Case map - 0x042C: []rune{0x044C}, // Case map - 0x042D: []rune{0x044D}, // Case map - 0x042E: []rune{0x044E}, // Case map - 0x042F: []rune{0x044F}, // Case map - 0x0460: []rune{0x0461}, // Case map - 0x0462: []rune{0x0463}, // Case map - 0x0464: []rune{0x0465}, // Case map - 0x0466: []rune{0x0467}, // Case map - 0x0468: []rune{0x0469}, // Case map - 0x046A: []rune{0x046B}, // Case map - 0x046C: []rune{0x046D}, // Case map - 0x046E: []rune{0x046F}, // Case map - 0x0470: []rune{0x0471}, // Case map - 0x0472: []rune{0x0473}, // Case map - 0x0474: []rune{0x0475}, // Case map - 0x0476: []rune{0x0477}, // Case map - 0x0478: []rune{0x0479}, // Case map - 0x047A: []rune{0x047B}, // Case map - 0x047C: []rune{0x047D}, // Case map - 0x047E: []rune{0x047F}, // Case map - 0x0480: []rune{0x0481}, // Case map - 0x048A: []rune{0x048B}, // Case map - 0x048C: []rune{0x048D}, // Case map - 0x048E: []rune{0x048F}, // Case map - 0x0490: []rune{0x0491}, // Case map - 0x0492: []rune{0x0493}, // Case map - 0x0494: []rune{0x0495}, // Case map - 0x0496: []rune{0x0497}, // Case map - 0x0498: []rune{0x0499}, // Case map - 0x049A: []rune{0x049B}, // Case map - 0x049C: []rune{0x049D}, // Case map - 0x049E: []rune{0x049F}, // Case map - 0x04A0: []rune{0x04A1}, // Case map - 0x04A2: []rune{0x04A3}, // Case map - 0x04A4: []rune{0x04A5}, // Case map - 0x04A6: []rune{0x04A7}, // Case map - 0x04A8: []rune{0x04A9}, // Case map - 0x04AA: []rune{0x04AB}, // Case map - 0x04AC: []rune{0x04AD}, // Case map - 0x04AE: []rune{0x04AF}, // Case map - 0x04B0: []rune{0x04B1}, // Case map - 0x04B2: []rune{0x04B3}, // Case map - 0x04B4: []rune{0x04B5}, // Case map - 0x04B6: []rune{0x04B7}, // Case map - 0x04B8: []rune{0x04B9}, // Case map - 0x04BA: []rune{0x04BB}, // Case map - 0x04BC: []rune{0x04BD}, // Case map - 0x04BE: []rune{0x04BF}, // Case map - 0x04C1: []rune{0x04C2}, // Case map - 0x04C3: []rune{0x04C4}, // Case map - 0x04C5: []rune{0x04C6}, // Case map - 0x04C7: []rune{0x04C8}, // Case map - 0x04C9: []rune{0x04CA}, // Case map - 0x04CB: []rune{0x04CC}, // Case map - 0x04CD: []rune{0x04CE}, // Case map - 0x04D0: []rune{0x04D1}, // Case map - 0x04D2: []rune{0x04D3}, // Case map - 0x04D4: []rune{0x04D5}, // Case map - 0x04D6: []rune{0x04D7}, // Case map - 0x04D8: []rune{0x04D9}, // Case map - 0x04DA: []rune{0x04DB}, // Case map - 0x04DC: []rune{0x04DD}, // Case map - 0x04DE: []rune{0x04DF}, // Case map - 0x04E0: []rune{0x04E1}, // Case map - 0x04E2: []rune{0x04E3}, // Case map - 0x04E4: []rune{0x04E5}, // Case map - 0x04E6: []rune{0x04E7}, // Case map - 0x04E8: []rune{0x04E9}, // Case map - 0x04EA: []rune{0x04EB}, // Case map - 0x04EC: []rune{0x04ED}, // Case map - 0x04EE: []rune{0x04EF}, // Case map - 0x04F0: []rune{0x04F1}, // Case map - 0x04F2: []rune{0x04F3}, // Case map - 0x04F4: []rune{0x04F5}, // Case map - 0x04F8: []rune{0x04F9}, // Case map - 0x0500: []rune{0x0501}, // Case map - 0x0502: []rune{0x0503}, // Case map - 0x0504: []rune{0x0505}, // Case map - 0x0506: []rune{0x0507}, // Case map - 0x0508: []rune{0x0509}, // Case map - 0x050A: []rune{0x050B}, // Case map - 0x050C: []rune{0x050D}, // Case map - 0x050E: []rune{0x050F}, // Case map - 0x0531: []rune{0x0561}, // Case map - 0x0532: []rune{0x0562}, // Case map - 0x0533: []rune{0x0563}, // Case map - 0x0534: []rune{0x0564}, // Case map - 0x0535: []rune{0x0565}, // Case map - 0x0536: []rune{0x0566}, // Case map - 0x0537: []rune{0x0567}, // Case map - 0x0538: []rune{0x0568}, // Case map - 0x0539: []rune{0x0569}, // Case map - 0x053A: []rune{0x056A}, // Case map - 0x053B: []rune{0x056B}, // Case map - 0x053C: []rune{0x056C}, // Case map - 0x053D: []rune{0x056D}, // Case map - 0x053E: []rune{0x056E}, // Case map - 0x053F: []rune{0x056F}, // Case map - 0x0540: []rune{0x0570}, // Case map - 0x0541: []rune{0x0571}, // Case map - 0x0542: []rune{0x0572}, // Case map - 0x0543: []rune{0x0573}, // Case map - 0x0544: []rune{0x0574}, // Case map - 0x0545: []rune{0x0575}, // Case map - 0x0546: []rune{0x0576}, // Case map - 0x0547: []rune{0x0577}, // Case map - 0x0548: []rune{0x0578}, // Case map - 0x0549: []rune{0x0579}, // Case map - 0x054A: []rune{0x057A}, // Case map - 0x054B: []rune{0x057B}, // Case map - 0x054C: []rune{0x057C}, // Case map - 0x054D: []rune{0x057D}, // Case map - 0x054E: []rune{0x057E}, // Case map - 0x054F: []rune{0x057F}, // Case map - 0x0550: []rune{0x0580}, // Case map - 0x0551: []rune{0x0581}, // Case map - 0x0552: []rune{0x0582}, // Case map - 0x0553: []rune{0x0583}, // Case map - 0x0554: []rune{0x0584}, // Case map - 0x0555: []rune{0x0585}, // Case map - 0x0556: []rune{0x0586}, // Case map - 0x0587: []rune{0x0565, 0x0582}, // Case map - 0x1E00: []rune{0x1E01}, // Case map - 0x1E02: []rune{0x1E03}, // Case map - 0x1E04: []rune{0x1E05}, // Case map - 0x1E06: []rune{0x1E07}, // Case map - 0x1E08: []rune{0x1E09}, // Case map - 0x1E0A: []rune{0x1E0B}, // Case map - 0x1E0C: []rune{0x1E0D}, // Case map - 0x1E0E: []rune{0x1E0F}, // Case map - 0x1E10: []rune{0x1E11}, // Case map - 0x1E12: []rune{0x1E13}, // Case map - 0x1E14: []rune{0x1E15}, // Case map - 0x1E16: []rune{0x1E17}, // Case map - 0x1E18: []rune{0x1E19}, // Case map - 0x1E1A: []rune{0x1E1B}, // Case map - 0x1E1C: []rune{0x1E1D}, // Case map - 0x1E1E: []rune{0x1E1F}, // Case map - 0x1E20: []rune{0x1E21}, // Case map - 0x1E22: []rune{0x1E23}, // Case map - 0x1E24: []rune{0x1E25}, // Case map - 0x1E26: []rune{0x1E27}, // Case map - 0x1E28: []rune{0x1E29}, // Case map - 0x1E2A: []rune{0x1E2B}, // Case map - 0x1E2C: []rune{0x1E2D}, // Case map - 0x1E2E: []rune{0x1E2F}, // Case map - 0x1E30: []rune{0x1E31}, // Case map - 0x1E32: []rune{0x1E33}, // Case map - 0x1E34: []rune{0x1E35}, // Case map - 0x1E36: []rune{0x1E37}, // Case map - 0x1E38: []rune{0x1E39}, // Case map - 0x1E3A: []rune{0x1E3B}, // Case map - 0x1E3C: []rune{0x1E3D}, // Case map - 0x1E3E: []rune{0x1E3F}, // Case map - 0x1E40: []rune{0x1E41}, // Case map - 0x1E42: []rune{0x1E43}, // Case map - 0x1E44: []rune{0x1E45}, // Case map - 0x1E46: []rune{0x1E47}, // Case map - 0x1E48: []rune{0x1E49}, // Case map - 0x1E4A: []rune{0x1E4B}, // Case map - 0x1E4C: []rune{0x1E4D}, // Case map - 0x1E4E: []rune{0x1E4F}, // Case map - 0x1E50: []rune{0x1E51}, // Case map - 0x1E52: []rune{0x1E53}, // Case map - 0x1E54: []rune{0x1E55}, // Case map - 0x1E56: []rune{0x1E57}, // Case map - 0x1E58: []rune{0x1E59}, // Case map - 0x1E5A: []rune{0x1E5B}, // Case map - 0x1E5C: []rune{0x1E5D}, // Case map - 0x1E5E: []rune{0x1E5F}, // Case map - 0x1E60: []rune{0x1E61}, // Case map - 0x1E62: []rune{0x1E63}, // Case map - 0x1E64: []rune{0x1E65}, // Case map - 0x1E66: []rune{0x1E67}, // Case map - 0x1E68: []rune{0x1E69}, // Case map - 0x1E6A: []rune{0x1E6B}, // Case map - 0x1E6C: []rune{0x1E6D}, // Case map - 0x1E6E: []rune{0x1E6F}, // Case map - 0x1E70: []rune{0x1E71}, // Case map - 0x1E72: []rune{0x1E73}, // Case map - 0x1E74: []rune{0x1E75}, // Case map - 0x1E76: []rune{0x1E77}, // Case map - 0x1E78: []rune{0x1E79}, // Case map - 0x1E7A: []rune{0x1E7B}, // Case map - 0x1E7C: []rune{0x1E7D}, // Case map - 0x1E7E: []rune{0x1E7F}, // Case map - 0x1E80: []rune{0x1E81}, // Case map - 0x1E82: []rune{0x1E83}, // Case map - 0x1E84: []rune{0x1E85}, // Case map - 0x1E86: []rune{0x1E87}, // Case map - 0x1E88: []rune{0x1E89}, // Case map - 0x1E8A: []rune{0x1E8B}, // Case map - 0x1E8C: []rune{0x1E8D}, // Case map - 0x1E8E: []rune{0x1E8F}, // Case map - 0x1E90: []rune{0x1E91}, // Case map - 0x1E92: []rune{0x1E93}, // Case map - 0x1E94: []rune{0x1E95}, // Case map - 0x1E96: []rune{0x0068, 0x0331}, // Case map - 0x1E97: []rune{0x0074, 0x0308}, // Case map - 0x1E98: []rune{0x0077, 0x030A}, // Case map - 0x1E99: []rune{0x0079, 0x030A}, // Case map - 0x1E9A: []rune{0x0061, 0x02BE}, // Case map - 0x1E9B: []rune{0x1E61}, // Case map - 0x1EA0: []rune{0x1EA1}, // Case map - 0x1EA2: []rune{0x1EA3}, // Case map - 0x1EA4: []rune{0x1EA5}, // Case map - 0x1EA6: []rune{0x1EA7}, // Case map - 0x1EA8: []rune{0x1EA9}, // Case map - 0x1EAA: []rune{0x1EAB}, // Case map - 0x1EAC: []rune{0x1EAD}, // Case map - 0x1EAE: []rune{0x1EAF}, // Case map - 0x1EB0: []rune{0x1EB1}, // Case map - 0x1EB2: []rune{0x1EB3}, // Case map - 0x1EB4: []rune{0x1EB5}, // Case map - 0x1EB6: []rune{0x1EB7}, // Case map - 0x1EB8: []rune{0x1EB9}, // Case map - 0x1EBA: []rune{0x1EBB}, // Case map - 0x1EBC: []rune{0x1EBD}, // Case map - 0x1EBE: []rune{0x1EBF}, // Case map - 0x1EC0: []rune{0x1EC1}, // Case map - 0x1EC2: []rune{0x1EC3}, // Case map - 0x1EC4: []rune{0x1EC5}, // Case map - 0x1EC6: []rune{0x1EC7}, // Case map - 0x1EC8: []rune{0x1EC9}, // Case map - 0x1ECA: []rune{0x1ECB}, // Case map - 0x1ECC: []rune{0x1ECD}, // Case map - 0x1ECE: []rune{0x1ECF}, // Case map - 0x1ED0: []rune{0x1ED1}, // Case map - 0x1ED2: []rune{0x1ED3}, // Case map - 0x1ED4: []rune{0x1ED5}, // Case map - 0x1ED6: []rune{0x1ED7}, // Case map - 0x1ED8: []rune{0x1ED9}, // Case map - 0x1EDA: []rune{0x1EDB}, // Case map - 0x1EDC: []rune{0x1EDD}, // Case map - 0x1EDE: []rune{0x1EDF}, // Case map - 0x1EE0: []rune{0x1EE1}, // Case map - 0x1EE2: []rune{0x1EE3}, // Case map - 0x1EE4: []rune{0x1EE5}, // Case map - 0x1EE6: []rune{0x1EE7}, // Case map - 0x1EE8: []rune{0x1EE9}, // Case map - 0x1EEA: []rune{0x1EEB}, // Case map - 0x1EEC: []rune{0x1EED}, // Case map - 0x1EEE: []rune{0x1EEF}, // Case map - 0x1EF0: []rune{0x1EF1}, // Case map - 0x1EF2: []rune{0x1EF3}, // Case map - 0x1EF4: []rune{0x1EF5}, // Case map - 0x1EF6: []rune{0x1EF7}, // Case map - 0x1EF8: []rune{0x1EF9}, // Case map - 0x1F08: []rune{0x1F00}, // Case map - 0x1F09: []rune{0x1F01}, // Case map - 0x1F0A: []rune{0x1F02}, // Case map - 0x1F0B: []rune{0x1F03}, // Case map - 0x1F0C: []rune{0x1F04}, // Case map - 0x1F0D: []rune{0x1F05}, // Case map - 0x1F0E: []rune{0x1F06}, // Case map - 0x1F0F: []rune{0x1F07}, // Case map - 0x1F18: []rune{0x1F10}, // Case map - 0x1F19: []rune{0x1F11}, // Case map - 0x1F1A: []rune{0x1F12}, // Case map - 0x1F1B: []rune{0x1F13}, // Case map - 0x1F1C: []rune{0x1F14}, // Case map - 0x1F1D: []rune{0x1F15}, // Case map - 0x1F28: []rune{0x1F20}, // Case map - 0x1F29: []rune{0x1F21}, // Case map - 0x1F2A: []rune{0x1F22}, // Case map - 0x1F2B: []rune{0x1F23}, // Case map - 0x1F2C: []rune{0x1F24}, // Case map - 0x1F2D: []rune{0x1F25}, // Case map - 0x1F2E: []rune{0x1F26}, // Case map - 0x1F2F: []rune{0x1F27}, // Case map - 0x1F38: []rune{0x1F30}, // Case map - 0x1F39: []rune{0x1F31}, // Case map - 0x1F3A: []rune{0x1F32}, // Case map - 0x1F3B: []rune{0x1F33}, // Case map - 0x1F3C: []rune{0x1F34}, // Case map - 0x1F3D: []rune{0x1F35}, // Case map - 0x1F3E: []rune{0x1F36}, // Case map - 0x1F3F: []rune{0x1F37}, // Case map - 0x1F48: []rune{0x1F40}, // Case map - 0x1F49: []rune{0x1F41}, // Case map - 0x1F4A: []rune{0x1F42}, // Case map - 0x1F4B: []rune{0x1F43}, // Case map - 0x1F4C: []rune{0x1F44}, // Case map - 0x1F4D: []rune{0x1F45}, // Case map - 0x1F50: []rune{0x03C5, 0x0313}, // Case map - 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map - 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map - 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map - 0x1F59: []rune{0x1F51}, // Case map - 0x1F5B: []rune{0x1F53}, // Case map - 0x1F5D: []rune{0x1F55}, // Case map - 0x1F5F: []rune{0x1F57}, // Case map - 0x1F68: []rune{0x1F60}, // Case map - 0x1F69: []rune{0x1F61}, // Case map - 0x1F6A: []rune{0x1F62}, // Case map - 0x1F6B: []rune{0x1F63}, // Case map - 0x1F6C: []rune{0x1F64}, // Case map - 0x1F6D: []rune{0x1F65}, // Case map - 0x1F6E: []rune{0x1F66}, // Case map - 0x1F6F: []rune{0x1F67}, // Case map - 0x1F80: []rune{0x1F00, 0x03B9}, // Case map - 0x1F81: []rune{0x1F01, 0x03B9}, // Case map - 0x1F82: []rune{0x1F02, 0x03B9}, // Case map - 0x1F83: []rune{0x1F03, 0x03B9}, // Case map - 0x1F84: []rune{0x1F04, 0x03B9}, // Case map - 0x1F85: []rune{0x1F05, 0x03B9}, // Case map - 0x1F86: []rune{0x1F06, 0x03B9}, // Case map - 0x1F87: []rune{0x1F07, 0x03B9}, // Case map - 0x1F88: []rune{0x1F00, 0x03B9}, // Case map - 0x1F89: []rune{0x1F01, 0x03B9}, // Case map - 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map - 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map - 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map - 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map - 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map - 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map - 0x1F90: []rune{0x1F20, 0x03B9}, // Case map - 0x1F91: []rune{0x1F21, 0x03B9}, // Case map - 0x1F92: []rune{0x1F22, 0x03B9}, // Case map - 0x1F93: []rune{0x1F23, 0x03B9}, // Case map - 0x1F94: []rune{0x1F24, 0x03B9}, // Case map - 0x1F95: []rune{0x1F25, 0x03B9}, // Case map - 0x1F96: []rune{0x1F26, 0x03B9}, // Case map - 0x1F97: []rune{0x1F27, 0x03B9}, // Case map - 0x1F98: []rune{0x1F20, 0x03B9}, // Case map - 0x1F99: []rune{0x1F21, 0x03B9}, // Case map - 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map - 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map - 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map - 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map - 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map - 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map - 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map - 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map - 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map - 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map - 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map - 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map - 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map - 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map - 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map - 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map - 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map - 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map - 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map - 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map - 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map - 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map - 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map - 0x1FB6: []rune{0x03B1, 0x0342}, // Case map - 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map - 0x1FB8: []rune{0x1FB0}, // Case map - 0x1FB9: []rune{0x1FB1}, // Case map - 0x1FBA: []rune{0x1F70}, // Case map - 0x1FBB: []rune{0x1F71}, // Case map - 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map - 0x1FBE: []rune{0x03B9}, // Case map - 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map - 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map - 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map - 0x1FC6: []rune{0x03B7, 0x0342}, // Case map - 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map - 0x1FC8: []rune{0x1F72}, // Case map - 0x1FC9: []rune{0x1F73}, // Case map - 0x1FCA: []rune{0x1F74}, // Case map - 0x1FCB: []rune{0x1F75}, // Case map - 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map - 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map - 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x1FD6: []rune{0x03B9, 0x0342}, // Case map - 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map - 0x1FD8: []rune{0x1FD0}, // Case map - 0x1FD9: []rune{0x1FD1}, // Case map - 0x1FDA: []rune{0x1F76}, // Case map - 0x1FDB: []rune{0x1F77}, // Case map - 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map - 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x1FE4: []rune{0x03C1, 0x0313}, // Case map - 0x1FE6: []rune{0x03C5, 0x0342}, // Case map - 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map - 0x1FE8: []rune{0x1FE0}, // Case map - 0x1FE9: []rune{0x1FE1}, // Case map - 0x1FEA: []rune{0x1F7A}, // Case map - 0x1FEB: []rune{0x1F7B}, // Case map - 0x1FEC: []rune{0x1FE5}, // Case map - 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map - 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map - 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map - 0x1FF6: []rune{0x03C9, 0x0342}, // Case map - 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map - 0x1FF8: []rune{0x1F78}, // Case map - 0x1FF9: []rune{0x1F79}, // Case map - 0x1FFA: []rune{0x1F7C}, // Case map - 0x1FFB: []rune{0x1F7D}, // Case map - 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map - 0x20A8: []rune{0x0072, 0x0073}, // Additional folding - 0x2102: []rune{0x0063}, // Additional folding - 0x2103: []rune{0x00B0, 0x0063}, // Additional folding - 0x2107: []rune{0x025B}, // Additional folding - 0x2109: []rune{0x00B0, 0x0066}, // Additional folding - 0x210B: []rune{0x0068}, // Additional folding - 0x210C: []rune{0x0068}, // Additional folding - 0x210D: []rune{0x0068}, // Additional folding - 0x2110: []rune{0x0069}, // Additional folding - 0x2111: []rune{0x0069}, // Additional folding - 0x2112: []rune{0x006C}, // Additional folding - 0x2115: []rune{0x006E}, // Additional folding - 0x2116: []rune{0x006E, 0x006F}, // Additional folding - 0x2119: []rune{0x0070}, // Additional folding - 0x211A: []rune{0x0071}, // Additional folding - 0x211B: []rune{0x0072}, // Additional folding - 0x211C: []rune{0x0072}, // Additional folding - 0x211D: []rune{0x0072}, // Additional folding - 0x2120: []rune{0x0073, 0x006D}, // Additional folding - 0x2121: []rune{0x0074, 0x0065, 0x006C}, // Additional folding - 0x2122: []rune{0x0074, 0x006D}, // Additional folding - 0x2124: []rune{0x007A}, // Additional folding - 0x2126: []rune{0x03C9}, // Case map - 0x2128: []rune{0x007A}, // Additional folding - 0x212A: []rune{0x006B}, // Case map - 0x212B: []rune{0x00E5}, // Case map - 0x212C: []rune{0x0062}, // Additional folding - 0x212D: []rune{0x0063}, // Additional folding - 0x2130: []rune{0x0065}, // Additional folding - 0x2131: []rune{0x0066}, // Additional folding - 0x2133: []rune{0x006D}, // Additional folding - 0x213E: []rune{0x03B3}, // Additional folding - 0x213F: []rune{0x03C0}, // Additional folding - 0x2145: []rune{0x0064}, // Additional folding - 0x2160: []rune{0x2170}, // Case map - 0x2161: []rune{0x2171}, // Case map - 0x2162: []rune{0x2172}, // Case map - 0x2163: []rune{0x2173}, // Case map - 0x2164: []rune{0x2174}, // Case map - 0x2165: []rune{0x2175}, // Case map - 0x2166: []rune{0x2176}, // Case map - 0x2167: []rune{0x2177}, // Case map - 0x2168: []rune{0x2178}, // Case map - 0x2169: []rune{0x2179}, // Case map - 0x216A: []rune{0x217A}, // Case map - 0x216B: []rune{0x217B}, // Case map - 0x216C: []rune{0x217C}, // Case map - 0x216D: []rune{0x217D}, // Case map - 0x216E: []rune{0x217E}, // Case map - 0x216F: []rune{0x217F}, // Case map - 0x24B6: []rune{0x24D0}, // Case map - 0x24B7: []rune{0x24D1}, // Case map - 0x24B8: []rune{0x24D2}, // Case map - 0x24B9: []rune{0x24D3}, // Case map - 0x24BA: []rune{0x24D4}, // Case map - 0x24BB: []rune{0x24D5}, // Case map - 0x24BC: []rune{0x24D6}, // Case map - 0x24BD: []rune{0x24D7}, // Case map - 0x24BE: []rune{0x24D8}, // Case map - 0x24BF: []rune{0x24D9}, // Case map - 0x24C0: []rune{0x24DA}, // Case map - 0x24C1: []rune{0x24DB}, // Case map - 0x24C2: []rune{0x24DC}, // Case map - 0x24C3: []rune{0x24DD}, // Case map - 0x24C4: []rune{0x24DE}, // Case map - 0x24C5: []rune{0x24DF}, // Case map - 0x24C6: []rune{0x24E0}, // Case map - 0x24C7: []rune{0x24E1}, // Case map - 0x24C8: []rune{0x24E2}, // Case map - 0x24C9: []rune{0x24E3}, // Case map - 0x24CA: []rune{0x24E4}, // Case map - 0x24CB: []rune{0x24E5}, // Case map - 0x24CC: []rune{0x24E6}, // Case map - 0x24CD: []rune{0x24E7}, // Case map - 0x24CE: []rune{0x24E8}, // Case map - 0x24CF: []rune{0x24E9}, // Case map - 0x3371: []rune{0x0068, 0x0070, 0x0061}, // Additional folding - 0x3373: []rune{0x0061, 0x0075}, // Additional folding - 0x3375: []rune{0x006F, 0x0076}, // Additional folding - 0x3380: []rune{0x0070, 0x0061}, // Additional folding - 0x3381: []rune{0x006E, 0x0061}, // Additional folding - 0x3382: []rune{0x03BC, 0x0061}, // Additional folding - 0x3383: []rune{0x006D, 0x0061}, // Additional folding - 0x3384: []rune{0x006B, 0x0061}, // Additional folding - 0x3385: []rune{0x006B, 0x0062}, // Additional folding - 0x3386: []rune{0x006D, 0x0062}, // Additional folding - 0x3387: []rune{0x0067, 0x0062}, // Additional folding - 0x338A: []rune{0x0070, 0x0066}, // Additional folding - 0x338B: []rune{0x006E, 0x0066}, // Additional folding - 0x338C: []rune{0x03BC, 0x0066}, // Additional folding - 0x3390: []rune{0x0068, 0x007A}, // Additional folding - 0x3391: []rune{0x006B, 0x0068, 0x007A}, // Additional folding - 0x3392: []rune{0x006D, 0x0068, 0x007A}, // Additional folding - 0x3393: []rune{0x0067, 0x0068, 0x007A}, // Additional folding - 0x3394: []rune{0x0074, 0x0068, 0x007A}, // Additional folding - 0x33A9: []rune{0x0070, 0x0061}, // Additional folding - 0x33AA: []rune{0x006B, 0x0070, 0x0061}, // Additional folding - 0x33AB: []rune{0x006D, 0x0070, 0x0061}, // Additional folding - 0x33AC: []rune{0x0067, 0x0070, 0x0061}, // Additional folding - 0x33B4: []rune{0x0070, 0x0076}, // Additional folding - 0x33B5: []rune{0x006E, 0x0076}, // Additional folding - 0x33B6: []rune{0x03BC, 0x0076}, // Additional folding - 0x33B7: []rune{0x006D, 0x0076}, // Additional folding - 0x33B8: []rune{0x006B, 0x0076}, // Additional folding - 0x33B9: []rune{0x006D, 0x0076}, // Additional folding - 0x33BA: []rune{0x0070, 0x0077}, // Additional folding - 0x33BB: []rune{0x006E, 0x0077}, // Additional folding - 0x33BC: []rune{0x03BC, 0x0077}, // Additional folding - 0x33BD: []rune{0x006D, 0x0077}, // Additional folding - 0x33BE: []rune{0x006B, 0x0077}, // Additional folding - 0x33BF: []rune{0x006D, 0x0077}, // Additional folding - 0x33C0: []rune{0x006B, 0x03C9}, // Additional folding - 0x33C1: []rune{0x006D, 0x03C9}, // Additional folding - 0x33C3: []rune{0x0062, 0x0071}, // Additional folding - 0x33C6: []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding - 0x33C7: []rune{0x0063, 0x006F, 0x002E}, // Additional folding - 0x33C8: []rune{0x0064, 0x0062}, // Additional folding - 0x33C9: []rune{0x0067, 0x0079}, // Additional folding - 0x33CB: []rune{0x0068, 0x0070}, // Additional folding - 0x33CD: []rune{0x006B, 0x006B}, // Additional folding - 0x33CE: []rune{0x006B, 0x006D}, // Additional folding - 0x33D7: []rune{0x0070, 0x0068}, // Additional folding - 0x33D9: []rune{0x0070, 0x0070, 0x006D}, // Additional folding - 0x33DA: []rune{0x0070, 0x0072}, // Additional folding - 0x33DC: []rune{0x0073, 0x0076}, // Additional folding - 0x33DD: []rune{0x0077, 0x0062}, // Additional folding - 0xFB00: []rune{0x0066, 0x0066}, // Case map - 0xFB01: []rune{0x0066, 0x0069}, // Case map - 0xFB02: []rune{0x0066, 0x006C}, // Case map - 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map - 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map - 0xFB05: []rune{0x0073, 0x0074}, // Case map - 0xFB06: []rune{0x0073, 0x0074}, // Case map - 0xFB13: []rune{0x0574, 0x0576}, // Case map - 0xFB14: []rune{0x0574, 0x0565}, // Case map - 0xFB15: []rune{0x0574, 0x056B}, // Case map - 0xFB16: []rune{0x057E, 0x0576}, // Case map - 0xFB17: []rune{0x0574, 0x056D}, // Case map - 0xFF21: []rune{0xFF41}, // Case map - 0xFF22: []rune{0xFF42}, // Case map - 0xFF23: []rune{0xFF43}, // Case map - 0xFF24: []rune{0xFF44}, // Case map - 0xFF25: []rune{0xFF45}, // Case map - 0xFF26: []rune{0xFF46}, // Case map - 0xFF27: []rune{0xFF47}, // Case map - 0xFF28: []rune{0xFF48}, // Case map - 0xFF29: []rune{0xFF49}, // Case map - 0xFF2A: []rune{0xFF4A}, // Case map - 0xFF2B: []rune{0xFF4B}, // Case map - 0xFF2C: []rune{0xFF4C}, // Case map - 0xFF2D: []rune{0xFF4D}, // Case map - 0xFF2E: []rune{0xFF4E}, // Case map - 0xFF2F: []rune{0xFF4F}, // Case map - 0xFF30: []rune{0xFF50}, // Case map - 0xFF31: []rune{0xFF51}, // Case map - 0xFF32: []rune{0xFF52}, // Case map - 0xFF33: []rune{0xFF53}, // Case map - 0xFF34: []rune{0xFF54}, // Case map - 0xFF35: []rune{0xFF55}, // Case map - 0xFF36: []rune{0xFF56}, // Case map - 0xFF37: []rune{0xFF57}, // Case map - 0xFF38: []rune{0xFF58}, // Case map - 0xFF39: []rune{0xFF59}, // Case map - 0xFF3A: []rune{0xFF5A}, // Case map - 0x10400: []rune{0x10428}, // Case map - 0x10401: []rune{0x10429}, // Case map - 0x10402: []rune{0x1042A}, // Case map - 0x10403: []rune{0x1042B}, // Case map - 0x10404: []rune{0x1042C}, // Case map - 0x10405: []rune{0x1042D}, // Case map - 0x10406: []rune{0x1042E}, // Case map - 0x10407: []rune{0x1042F}, // Case map - 0x10408: []rune{0x10430}, // Case map - 0x10409: []rune{0x10431}, // Case map - 0x1040A: []rune{0x10432}, // Case map - 0x1040B: []rune{0x10433}, // Case map - 0x1040C: []rune{0x10434}, // Case map - 0x1040D: []rune{0x10435}, // Case map - 0x1040E: []rune{0x10436}, // Case map - 0x1040F: []rune{0x10437}, // Case map - 0x10410: []rune{0x10438}, // Case map - 0x10411: []rune{0x10439}, // Case map - 0x10412: []rune{0x1043A}, // Case map - 0x10413: []rune{0x1043B}, // Case map - 0x10414: []rune{0x1043C}, // Case map - 0x10415: []rune{0x1043D}, // Case map - 0x10416: []rune{0x1043E}, // Case map - 0x10417: []rune{0x1043F}, // Case map - 0x10418: []rune{0x10440}, // Case map - 0x10419: []rune{0x10441}, // Case map - 0x1041A: []rune{0x10442}, // Case map - 0x1041B: []rune{0x10443}, // Case map - 0x1041C: []rune{0x10444}, // Case map - 0x1041D: []rune{0x10445}, // Case map - 0x1041E: []rune{0x10446}, // Case map - 0x1041F: []rune{0x10447}, // Case map - 0x10420: []rune{0x10448}, // Case map - 0x10421: []rune{0x10449}, // Case map - 0x10422: []rune{0x1044A}, // Case map - 0x10423: []rune{0x1044B}, // Case map - 0x10424: []rune{0x1044C}, // Case map - 0x10425: []rune{0x1044D}, // Case map - 0x1D400: []rune{0x0061}, // Additional folding - 0x1D401: []rune{0x0062}, // Additional folding - 0x1D402: []rune{0x0063}, // Additional folding - 0x1D403: []rune{0x0064}, // Additional folding - 0x1D404: []rune{0x0065}, // Additional folding - 0x1D405: []rune{0x0066}, // Additional folding - 0x1D406: []rune{0x0067}, // Additional folding - 0x1D407: []rune{0x0068}, // Additional folding - 0x1D408: []rune{0x0069}, // Additional folding - 0x1D409: []rune{0x006A}, // Additional folding - 0x1D40A: []rune{0x006B}, // Additional folding - 0x1D40B: []rune{0x006C}, // Additional folding - 0x1D40C: []rune{0x006D}, // Additional folding - 0x1D40D: []rune{0x006E}, // Additional folding - 0x1D40E: []rune{0x006F}, // Additional folding - 0x1D40F: []rune{0x0070}, // Additional folding - 0x1D410: []rune{0x0071}, // Additional folding - 0x1D411: []rune{0x0072}, // Additional folding - 0x1D412: []rune{0x0073}, // Additional folding - 0x1D413: []rune{0x0074}, // Additional folding - 0x1D414: []rune{0x0075}, // Additional folding - 0x1D415: []rune{0x0076}, // Additional folding - 0x1D416: []rune{0x0077}, // Additional folding - 0x1D417: []rune{0x0078}, // Additional folding - 0x1D418: []rune{0x0079}, // Additional folding - 0x1D419: []rune{0x007A}, // Additional folding - 0x1D434: []rune{0x0061}, // Additional folding - 0x1D435: []rune{0x0062}, // Additional folding - 0x1D436: []rune{0x0063}, // Additional folding - 0x1D437: []rune{0x0064}, // Additional folding - 0x1D438: []rune{0x0065}, // Additional folding - 0x1D439: []rune{0x0066}, // Additional folding - 0x1D43A: []rune{0x0067}, // Additional folding - 0x1D43B: []rune{0x0068}, // Additional folding - 0x1D43C: []rune{0x0069}, // Additional folding - 0x1D43D: []rune{0x006A}, // Additional folding - 0x1D43E: []rune{0x006B}, // Additional folding - 0x1D43F: []rune{0x006C}, // Additional folding - 0x1D440: []rune{0x006D}, // Additional folding - 0x1D441: []rune{0x006E}, // Additional folding - 0x1D442: []rune{0x006F}, // Additional folding - 0x1D443: []rune{0x0070}, // Additional folding - 0x1D444: []rune{0x0071}, // Additional folding - 0x1D445: []rune{0x0072}, // Additional folding - 0x1D446: []rune{0x0073}, // Additional folding - 0x1D447: []rune{0x0074}, // Additional folding - 0x1D448: []rune{0x0075}, // Additional folding - 0x1D449: []rune{0x0076}, // Additional folding - 0x1D44A: []rune{0x0077}, // Additional folding - 0x1D44B: []rune{0x0078}, // Additional folding - 0x1D44C: []rune{0x0079}, // Additional folding - 0x1D44D: []rune{0x007A}, // Additional folding - 0x1D468: []rune{0x0061}, // Additional folding - 0x1D469: []rune{0x0062}, // Additional folding - 0x1D46A: []rune{0x0063}, // Additional folding - 0x1D46B: []rune{0x0064}, // Additional folding - 0x1D46C: []rune{0x0065}, // Additional folding - 0x1D46D: []rune{0x0066}, // Additional folding - 0x1D46E: []rune{0x0067}, // Additional folding - 0x1D46F: []rune{0x0068}, // Additional folding - 0x1D470: []rune{0x0069}, // Additional folding - 0x1D471: []rune{0x006A}, // Additional folding - 0x1D472: []rune{0x006B}, // Additional folding - 0x1D473: []rune{0x006C}, // Additional folding - 0x1D474: []rune{0x006D}, // Additional folding - 0x1D475: []rune{0x006E}, // Additional folding - 0x1D476: []rune{0x006F}, // Additional folding - 0x1D477: []rune{0x0070}, // Additional folding - 0x1D478: []rune{0x0071}, // Additional folding - 0x1D479: []rune{0x0072}, // Additional folding - 0x1D47A: []rune{0x0073}, // Additional folding - 0x1D47B: []rune{0x0074}, // Additional folding - 0x1D47C: []rune{0x0075}, // Additional folding - 0x1D47D: []rune{0x0076}, // Additional folding - 0x1D47E: []rune{0x0077}, // Additional folding - 0x1D47F: []rune{0x0078}, // Additional folding - 0x1D480: []rune{0x0079}, // Additional folding - 0x1D481: []rune{0x007A}, // Additional folding - 0x1D49C: []rune{0x0061}, // Additional folding - 0x1D49E: []rune{0x0063}, // Additional folding - 0x1D49F: []rune{0x0064}, // Additional folding - 0x1D4A2: []rune{0x0067}, // Additional folding - 0x1D4A5: []rune{0x006A}, // Additional folding - 0x1D4A6: []rune{0x006B}, // Additional folding - 0x1D4A9: []rune{0x006E}, // Additional folding - 0x1D4AA: []rune{0x006F}, // Additional folding - 0x1D4AB: []rune{0x0070}, // Additional folding - 0x1D4AC: []rune{0x0071}, // Additional folding - 0x1D4AE: []rune{0x0073}, // Additional folding - 0x1D4AF: []rune{0x0074}, // Additional folding - 0x1D4B0: []rune{0x0075}, // Additional folding - 0x1D4B1: []rune{0x0076}, // Additional folding - 0x1D4B2: []rune{0x0077}, // Additional folding - 0x1D4B3: []rune{0x0078}, // Additional folding - 0x1D4B4: []rune{0x0079}, // Additional folding - 0x1D4B5: []rune{0x007A}, // Additional folding - 0x1D4D0: []rune{0x0061}, // Additional folding - 0x1D4D1: []rune{0x0062}, // Additional folding - 0x1D4D2: []rune{0x0063}, // Additional folding - 0x1D4D3: []rune{0x0064}, // Additional folding - 0x1D4D4: []rune{0x0065}, // Additional folding - 0x1D4D5: []rune{0x0066}, // Additional folding - 0x1D4D6: []rune{0x0067}, // Additional folding - 0x1D4D7: []rune{0x0068}, // Additional folding - 0x1D4D8: []rune{0x0069}, // Additional folding - 0x1D4D9: []rune{0x006A}, // Additional folding - 0x1D4DA: []rune{0x006B}, // Additional folding - 0x1D4DB: []rune{0x006C}, // Additional folding - 0x1D4DC: []rune{0x006D}, // Additional folding - 0x1D4DD: []rune{0x006E}, // Additional folding - 0x1D4DE: []rune{0x006F}, // Additional folding - 0x1D4DF: []rune{0x0070}, // Additional folding - 0x1D4E0: []rune{0x0071}, // Additional folding - 0x1D4E1: []rune{0x0072}, // Additional folding - 0x1D4E2: []rune{0x0073}, // Additional folding - 0x1D4E3: []rune{0x0074}, // Additional folding - 0x1D4E4: []rune{0x0075}, // Additional folding - 0x1D4E5: []rune{0x0076}, // Additional folding - 0x1D4E6: []rune{0x0077}, // Additional folding - 0x1D4E7: []rune{0x0078}, // Additional folding - 0x1D4E8: []rune{0x0079}, // Additional folding - 0x1D4E9: []rune{0x007A}, // Additional folding - 0x1D504: []rune{0x0061}, // Additional folding - 0x1D505: []rune{0x0062}, // Additional folding - 0x1D507: []rune{0x0064}, // Additional folding - 0x1D508: []rune{0x0065}, // Additional folding - 0x1D509: []rune{0x0066}, // Additional folding - 0x1D50A: []rune{0x0067}, // Additional folding - 0x1D50D: []rune{0x006A}, // Additional folding - 0x1D50E: []rune{0x006B}, // Additional folding - 0x1D50F: []rune{0x006C}, // Additional folding - 0x1D510: []rune{0x006D}, // Additional folding - 0x1D511: []rune{0x006E}, // Additional folding - 0x1D512: []rune{0x006F}, // Additional folding - 0x1D513: []rune{0x0070}, // Additional folding - 0x1D514: []rune{0x0071}, // Additional folding - 0x1D516: []rune{0x0073}, // Additional folding - 0x1D517: []rune{0x0074}, // Additional folding - 0x1D518: []rune{0x0075}, // Additional folding - 0x1D519: []rune{0x0076}, // Additional folding - 0x1D51A: []rune{0x0077}, // Additional folding - 0x1D51B: []rune{0x0078}, // Additional folding - 0x1D51C: []rune{0x0079}, // Additional folding - 0x1D538: []rune{0x0061}, // Additional folding - 0x1D539: []rune{0x0062}, // Additional folding - 0x1D53B: []rune{0x0064}, // Additional folding - 0x1D53C: []rune{0x0065}, // Additional folding - 0x1D53D: []rune{0x0066}, // Additional folding - 0x1D53E: []rune{0x0067}, // Additional folding - 0x1D540: []rune{0x0069}, // Additional folding - 0x1D541: []rune{0x006A}, // Additional folding - 0x1D542: []rune{0x006B}, // Additional folding - 0x1D543: []rune{0x006C}, // Additional folding - 0x1D544: []rune{0x006D}, // Additional folding - 0x1D546: []rune{0x006F}, // Additional folding - 0x1D54A: []rune{0x0073}, // Additional folding - 0x1D54B: []rune{0x0074}, // Additional folding - 0x1D54C: []rune{0x0075}, // Additional folding - 0x1D54D: []rune{0x0076}, // Additional folding - 0x1D54E: []rune{0x0077}, // Additional folding - 0x1D54F: []rune{0x0078}, // Additional folding - 0x1D550: []rune{0x0079}, // Additional folding - 0x1D56C: []rune{0x0061}, // Additional folding - 0x1D56D: []rune{0x0062}, // Additional folding - 0x1D56E: []rune{0x0063}, // Additional folding - 0x1D56F: []rune{0x0064}, // Additional folding - 0x1D570: []rune{0x0065}, // Additional folding - 0x1D571: []rune{0x0066}, // Additional folding - 0x1D572: []rune{0x0067}, // Additional folding - 0x1D573: []rune{0x0068}, // Additional folding - 0x1D574: []rune{0x0069}, // Additional folding - 0x1D575: []rune{0x006A}, // Additional folding - 0x1D576: []rune{0x006B}, // Additional folding - 0x1D577: []rune{0x006C}, // Additional folding - 0x1D578: []rune{0x006D}, // Additional folding - 0x1D579: []rune{0x006E}, // Additional folding - 0x1D57A: []rune{0x006F}, // Additional folding - 0x1D57B: []rune{0x0070}, // Additional folding - 0x1D57C: []rune{0x0071}, // Additional folding - 0x1D57D: []rune{0x0072}, // Additional folding - 0x1D57E: []rune{0x0073}, // Additional folding - 0x1D57F: []rune{0x0074}, // Additional folding - 0x1D580: []rune{0x0075}, // Additional folding - 0x1D581: []rune{0x0076}, // Additional folding - 0x1D582: []rune{0x0077}, // Additional folding - 0x1D583: []rune{0x0078}, // Additional folding - 0x1D584: []rune{0x0079}, // Additional folding - 0x1D585: []rune{0x007A}, // Additional folding - 0x1D5A0: []rune{0x0061}, // Additional folding - 0x1D5A1: []rune{0x0062}, // Additional folding - 0x1D5A2: []rune{0x0063}, // Additional folding - 0x1D5A3: []rune{0x0064}, // Additional folding - 0x1D5A4: []rune{0x0065}, // Additional folding - 0x1D5A5: []rune{0x0066}, // Additional folding - 0x1D5A6: []rune{0x0067}, // Additional folding - 0x1D5A7: []rune{0x0068}, // Additional folding - 0x1D5A8: []rune{0x0069}, // Additional folding - 0x1D5A9: []rune{0x006A}, // Additional folding - 0x1D5AA: []rune{0x006B}, // Additional folding - 0x1D5AB: []rune{0x006C}, // Additional folding - 0x1D5AC: []rune{0x006D}, // Additional folding - 0x1D5AD: []rune{0x006E}, // Additional folding - 0x1D5AE: []rune{0x006F}, // Additional folding - 0x1D5AF: []rune{0x0070}, // Additional folding - 0x1D5B0: []rune{0x0071}, // Additional folding - 0x1D5B1: []rune{0x0072}, // Additional folding - 0x1D5B2: []rune{0x0073}, // Additional folding - 0x1D5B3: []rune{0x0074}, // Additional folding - 0x1D5B4: []rune{0x0075}, // Additional folding - 0x1D5B5: []rune{0x0076}, // Additional folding - 0x1D5B6: []rune{0x0077}, // Additional folding - 0x1D5B7: []rune{0x0078}, // Additional folding - 0x1D5B8: []rune{0x0079}, // Additional folding - 0x1D5B9: []rune{0x007A}, // Additional folding - 0x1D5D4: []rune{0x0061}, // Additional folding - 0x1D5D5: []rune{0x0062}, // Additional folding - 0x1D5D6: []rune{0x0063}, // Additional folding - 0x1D5D7: []rune{0x0064}, // Additional folding - 0x1D5D8: []rune{0x0065}, // Additional folding - 0x1D5D9: []rune{0x0066}, // Additional folding - 0x1D5DA: []rune{0x0067}, // Additional folding - 0x1D5DB: []rune{0x0068}, // Additional folding - 0x1D5DC: []rune{0x0069}, // Additional folding - 0x1D5DD: []rune{0x006A}, // Additional folding - 0x1D5DE: []rune{0x006B}, // Additional folding - 0x1D5DF: []rune{0x006C}, // Additional folding - 0x1D5E0: []rune{0x006D}, // Additional folding - 0x1D5E1: []rune{0x006E}, // Additional folding - 0x1D5E2: []rune{0x006F}, // Additional folding - 0x1D5E3: []rune{0x0070}, // Additional folding - 0x1D5E4: []rune{0x0071}, // Additional folding - 0x1D5E5: []rune{0x0072}, // Additional folding - 0x1D5E6: []rune{0x0073}, // Additional folding - 0x1D5E7: []rune{0x0074}, // Additional folding - 0x1D5E8: []rune{0x0075}, // Additional folding - 0x1D5E9: []rune{0x0076}, // Additional folding - 0x1D5EA: []rune{0x0077}, // Additional folding - 0x1D5EB: []rune{0x0078}, // Additional folding - 0x1D5EC: []rune{0x0079}, // Additional folding - 0x1D5ED: []rune{0x007A}, // Additional folding - 0x1D608: []rune{0x0061}, // Additional folding - 0x1D609: []rune{0x0062}, // Additional folding - 0x1D60A: []rune{0x0063}, // Additional folding - 0x1D60B: []rune{0x0064}, // Additional folding - 0x1D60C: []rune{0x0065}, // Additional folding - 0x1D60D: []rune{0x0066}, // Additional folding - 0x1D60E: []rune{0x0067}, // Additional folding - 0x1D60F: []rune{0x0068}, // Additional folding - 0x1D610: []rune{0x0069}, // Additional folding - 0x1D611: []rune{0x006A}, // Additional folding - 0x1D612: []rune{0x006B}, // Additional folding - 0x1D613: []rune{0x006C}, // Additional folding - 0x1D614: []rune{0x006D}, // Additional folding - 0x1D615: []rune{0x006E}, // Additional folding - 0x1D616: []rune{0x006F}, // Additional folding - 0x1D617: []rune{0x0070}, // Additional folding - 0x1D618: []rune{0x0071}, // Additional folding - 0x1D619: []rune{0x0072}, // Additional folding - 0x1D61A: []rune{0x0073}, // Additional folding - 0x1D61B: []rune{0x0074}, // Additional folding - 0x1D61C: []rune{0x0075}, // Additional folding - 0x1D61D: []rune{0x0076}, // Additional folding - 0x1D61E: []rune{0x0077}, // Additional folding - 0x1D61F: []rune{0x0078}, // Additional folding - 0x1D620: []rune{0x0079}, // Additional folding - 0x1D621: []rune{0x007A}, // Additional folding - 0x1D63C: []rune{0x0061}, // Additional folding - 0x1D63D: []rune{0x0062}, // Additional folding - 0x1D63E: []rune{0x0063}, // Additional folding - 0x1D63F: []rune{0x0064}, // Additional folding - 0x1D640: []rune{0x0065}, // Additional folding - 0x1D641: []rune{0x0066}, // Additional folding - 0x1D642: []rune{0x0067}, // Additional folding - 0x1D643: []rune{0x0068}, // Additional folding - 0x1D644: []rune{0x0069}, // Additional folding - 0x1D645: []rune{0x006A}, // Additional folding - 0x1D646: []rune{0x006B}, // Additional folding - 0x1D647: []rune{0x006C}, // Additional folding - 0x1D648: []rune{0x006D}, // Additional folding - 0x1D649: []rune{0x006E}, // Additional folding - 0x1D64A: []rune{0x006F}, // Additional folding - 0x1D64B: []rune{0x0070}, // Additional folding - 0x1D64C: []rune{0x0071}, // Additional folding - 0x1D64D: []rune{0x0072}, // Additional folding - 0x1D64E: []rune{0x0073}, // Additional folding - 0x1D64F: []rune{0x0074}, // Additional folding - 0x1D650: []rune{0x0075}, // Additional folding - 0x1D651: []rune{0x0076}, // Additional folding - 0x1D652: []rune{0x0077}, // Additional folding - 0x1D653: []rune{0x0078}, // Additional folding - 0x1D654: []rune{0x0079}, // Additional folding - 0x1D655: []rune{0x007A}, // Additional folding - 0x1D670: []rune{0x0061}, // Additional folding - 0x1D671: []rune{0x0062}, // Additional folding - 0x1D672: []rune{0x0063}, // Additional folding - 0x1D673: []rune{0x0064}, // Additional folding - 0x1D674: []rune{0x0065}, // Additional folding - 0x1D675: []rune{0x0066}, // Additional folding - 0x1D676: []rune{0x0067}, // Additional folding - 0x1D677: []rune{0x0068}, // Additional folding - 0x1D678: []rune{0x0069}, // Additional folding - 0x1D679: []rune{0x006A}, // Additional folding - 0x1D67A: []rune{0x006B}, // Additional folding - 0x1D67B: []rune{0x006C}, // Additional folding - 0x1D67C: []rune{0x006D}, // Additional folding - 0x1D67D: []rune{0x006E}, // Additional folding - 0x1D67E: []rune{0x006F}, // Additional folding - 0x1D67F: []rune{0x0070}, // Additional folding - 0x1D680: []rune{0x0071}, // Additional folding - 0x1D681: []rune{0x0072}, // Additional folding - 0x1D682: []rune{0x0073}, // Additional folding - 0x1D683: []rune{0x0074}, // Additional folding - 0x1D684: []rune{0x0075}, // Additional folding - 0x1D685: []rune{0x0076}, // Additional folding - 0x1D686: []rune{0x0077}, // Additional folding - 0x1D687: []rune{0x0078}, // Additional folding - 0x1D688: []rune{0x0079}, // Additional folding - 0x1D689: []rune{0x007A}, // Additional folding - 0x1D6A8: []rune{0x03B1}, // Additional folding - 0x1D6A9: []rune{0x03B2}, // Additional folding - 0x1D6AA: []rune{0x03B3}, // Additional folding - 0x1D6AB: []rune{0x03B4}, // Additional folding - 0x1D6AC: []rune{0x03B5}, // Additional folding - 0x1D6AD: []rune{0x03B6}, // Additional folding - 0x1D6AE: []rune{0x03B7}, // Additional folding - 0x1D6AF: []rune{0x03B8}, // Additional folding - 0x1D6B0: []rune{0x03B9}, // Additional folding - 0x1D6B1: []rune{0x03BA}, // Additional folding - 0x1D6B2: []rune{0x03BB}, // Additional folding - 0x1D6B3: []rune{0x03BC}, // Additional folding - 0x1D6B4: []rune{0x03BD}, // Additional folding - 0x1D6B5: []rune{0x03BE}, // Additional folding - 0x1D6B6: []rune{0x03BF}, // Additional folding - 0x1D6B7: []rune{0x03C0}, // Additional folding - 0x1D6B8: []rune{0x03C1}, // Additional folding - 0x1D6B9: []rune{0x03B8}, // Additional folding - 0x1D6BA: []rune{0x03C3}, // Additional folding - 0x1D6BB: []rune{0x03C4}, // Additional folding - 0x1D6BC: []rune{0x03C5}, // Additional folding - 0x1D6BD: []rune{0x03C6}, // Additional folding - 0x1D6BE: []rune{0x03C7}, // Additional folding - 0x1D6BF: []rune{0x03C8}, // Additional folding - 0x1D6C0: []rune{0x03C9}, // Additional folding - 0x1D6D3: []rune{0x03C3}, // Additional folding - 0x1D6E2: []rune{0x03B1}, // Additional folding - 0x1D6E3: []rune{0x03B2}, // Additional folding - 0x1D6E4: []rune{0x03B3}, // Additional folding - 0x1D6E5: []rune{0x03B4}, // Additional folding - 0x1D6E6: []rune{0x03B5}, // Additional folding - 0x1D6E7: []rune{0x03B6}, // Additional folding - 0x1D6E8: []rune{0x03B7}, // Additional folding - 0x1D6E9: []rune{0x03B8}, // Additional folding - 0x1D6EA: []rune{0x03B9}, // Additional folding - 0x1D6EB: []rune{0x03BA}, // Additional folding - 0x1D6EC: []rune{0x03BB}, // Additional folding - 0x1D6ED: []rune{0x03BC}, // Additional folding - 0x1D6EE: []rune{0x03BD}, // Additional folding - 0x1D6EF: []rune{0x03BE}, // Additional folding - 0x1D6F0: []rune{0x03BF}, // Additional folding - 0x1D6F1: []rune{0x03C0}, // Additional folding - 0x1D6F2: []rune{0x03C1}, // Additional folding - 0x1D6F3: []rune{0x03B8}, // Additional folding - 0x1D6F4: []rune{0x03C3}, // Additional folding - 0x1D6F5: []rune{0x03C4}, // Additional folding - 0x1D6F6: []rune{0x03C5}, // Additional folding - 0x1D6F7: []rune{0x03C6}, // Additional folding - 0x1D6F8: []rune{0x03C7}, // Additional folding - 0x1D6F9: []rune{0x03C8}, // Additional folding - 0x1D6FA: []rune{0x03C9}, // Additional folding - 0x1D70D: []rune{0x03C3}, // Additional folding - 0x1D71C: []rune{0x03B1}, // Additional folding - 0x1D71D: []rune{0x03B2}, // Additional folding - 0x1D71E: []rune{0x03B3}, // Additional folding - 0x1D71F: []rune{0x03B4}, // Additional folding - 0x1D720: []rune{0x03B5}, // Additional folding - 0x1D721: []rune{0x03B6}, // Additional folding - 0x1D722: []rune{0x03B7}, // Additional folding - 0x1D723: []rune{0x03B8}, // Additional folding - 0x1D724: []rune{0x03B9}, // Additional folding - 0x1D725: []rune{0x03BA}, // Additional folding - 0x1D726: []rune{0x03BB}, // Additional folding - 0x1D727: []rune{0x03BC}, // Additional folding - 0x1D728: []rune{0x03BD}, // Additional folding - 0x1D729: []rune{0x03BE}, // Additional folding - 0x1D72A: []rune{0x03BF}, // Additional folding - 0x1D72B: []rune{0x03C0}, // Additional folding - 0x1D72C: []rune{0x03C1}, // Additional folding - 0x1D72D: []rune{0x03B8}, // Additional folding - 0x1D72E: []rune{0x03C3}, // Additional folding - 0x1D72F: []rune{0x03C4}, // Additional folding - 0x1D730: []rune{0x03C5}, // Additional folding - 0x1D731: []rune{0x03C6}, // Additional folding - 0x1D732: []rune{0x03C7}, // Additional folding - 0x1D733: []rune{0x03C8}, // Additional folding - 0x1D734: []rune{0x03C9}, // Additional folding - 0x1D747: []rune{0x03C3}, // Additional folding - 0x1D756: []rune{0x03B1}, // Additional folding - 0x1D757: []rune{0x03B2}, // Additional folding - 0x1D758: []rune{0x03B3}, // Additional folding - 0x1D759: []rune{0x03B4}, // Additional folding - 0x1D75A: []rune{0x03B5}, // Additional folding - 0x1D75B: []rune{0x03B6}, // Additional folding - 0x1D75C: []rune{0x03B7}, // Additional folding - 0x1D75D: []rune{0x03B8}, // Additional folding - 0x1D75E: []rune{0x03B9}, // Additional folding - 0x1D75F: []rune{0x03BA}, // Additional folding - 0x1D760: []rune{0x03BB}, // Additional folding - 0x1D761: []rune{0x03BC}, // Additional folding - 0x1D762: []rune{0x03BD}, // Additional folding - 0x1D763: []rune{0x03BE}, // Additional folding - 0x1D764: []rune{0x03BF}, // Additional folding - 0x1D765: []rune{0x03C0}, // Additional folding - 0x1D766: []rune{0x03C1}, // Additional folding - 0x1D767: []rune{0x03B8}, // Additional folding - 0x1D768: []rune{0x03C3}, // Additional folding - 0x1D769: []rune{0x03C4}, // Additional folding - 0x1D76A: []rune{0x03C5}, // Additional folding - 0x1D76B: []rune{0x03C6}, // Additional folding - 0x1D76C: []rune{0x03C7}, // Additional folding - 0x1D76D: []rune{0x03C8}, // Additional folding - 0x1D76E: []rune{0x03C9}, // Additional folding - 0x1D781: []rune{0x03C3}, // Additional folding - 0x1D790: []rune{0x03B1}, // Additional folding - 0x1D791: []rune{0x03B2}, // Additional folding - 0x1D792: []rune{0x03B3}, // Additional folding - 0x1D793: []rune{0x03B4}, // Additional folding - 0x1D794: []rune{0x03B5}, // Additional folding - 0x1D795: []rune{0x03B6}, // Additional folding - 0x1D796: []rune{0x03B7}, // Additional folding - 0x1D797: []rune{0x03B8}, // Additional folding - 0x1D798: []rune{0x03B9}, // Additional folding - 0x1D799: []rune{0x03BA}, // Additional folding - 0x1D79A: []rune{0x03BB}, // Additional folding - 0x1D79B: []rune{0x03BC}, // Additional folding - 0x1D79C: []rune{0x03BD}, // Additional folding - 0x1D79D: []rune{0x03BE}, // Additional folding - 0x1D79E: []rune{0x03BF}, // Additional folding - 0x1D79F: []rune{0x03C0}, // Additional folding - 0x1D7A0: []rune{0x03C1}, // Additional folding - 0x1D7A1: []rune{0x03B8}, // Additional folding - 0x1D7A2: []rune{0x03C3}, // Additional folding - 0x1D7A3: []rune{0x03C4}, // Additional folding - 0x1D7A4: []rune{0x03C5}, // Additional folding - 0x1D7A5: []rune{0x03C6}, // Additional folding - 0x1D7A6: []rune{0x03C7}, // Additional folding - 0x1D7A7: []rune{0x03C8}, // Additional folding - 0x1D7A8: []rune{0x03C9}, // Additional folding - 0x1D7BB: []rune{0x03C3}, // Additional folding -} - -// TableB2 represents RFC-3454 Table B.2. -var TableB2 Mapping = tableB2 - -var tableB3 = Mapping{ - 0x0041: []rune{0x0061}, // Case map - 0x0042: []rune{0x0062}, // Case map - 0x0043: []rune{0x0063}, // Case map - 0x0044: []rune{0x0064}, // Case map - 0x0045: []rune{0x0065}, // Case map - 0x0046: []rune{0x0066}, // Case map - 0x0047: []rune{0x0067}, // Case map - 0x0048: []rune{0x0068}, // Case map - 0x0049: []rune{0x0069}, // Case map - 0x004A: []rune{0x006A}, // Case map - 0x004B: []rune{0x006B}, // Case map - 0x004C: []rune{0x006C}, // Case map - 0x004D: []rune{0x006D}, // Case map - 0x004E: []rune{0x006E}, // Case map - 0x004F: []rune{0x006F}, // Case map - 0x0050: []rune{0x0070}, // Case map - 0x0051: []rune{0x0071}, // Case map - 0x0052: []rune{0x0072}, // Case map - 0x0053: []rune{0x0073}, // Case map - 0x0054: []rune{0x0074}, // Case map - 0x0055: []rune{0x0075}, // Case map - 0x0056: []rune{0x0076}, // Case map - 0x0057: []rune{0x0077}, // Case map - 0x0058: []rune{0x0078}, // Case map - 0x0059: []rune{0x0079}, // Case map - 0x005A: []rune{0x007A}, // Case map - 0x00B5: []rune{0x03BC}, // Case map - 0x00C0: []rune{0x00E0}, // Case map - 0x00C1: []rune{0x00E1}, // Case map - 0x00C2: []rune{0x00E2}, // Case map - 0x00C3: []rune{0x00E3}, // Case map - 0x00C4: []rune{0x00E4}, // Case map - 0x00C5: []rune{0x00E5}, // Case map - 0x00C6: []rune{0x00E6}, // Case map - 0x00C7: []rune{0x00E7}, // Case map - 0x00C8: []rune{0x00E8}, // Case map - 0x00C9: []rune{0x00E9}, // Case map - 0x00CA: []rune{0x00EA}, // Case map - 0x00CB: []rune{0x00EB}, // Case map - 0x00CC: []rune{0x00EC}, // Case map - 0x00CD: []rune{0x00ED}, // Case map - 0x00CE: []rune{0x00EE}, // Case map - 0x00CF: []rune{0x00EF}, // Case map - 0x00D0: []rune{0x00F0}, // Case map - 0x00D1: []rune{0x00F1}, // Case map - 0x00D2: []rune{0x00F2}, // Case map - 0x00D3: []rune{0x00F3}, // Case map - 0x00D4: []rune{0x00F4}, // Case map - 0x00D5: []rune{0x00F5}, // Case map - 0x00D6: []rune{0x00F6}, // Case map - 0x00D8: []rune{0x00F8}, // Case map - 0x00D9: []rune{0x00F9}, // Case map - 0x00DA: []rune{0x00FA}, // Case map - 0x00DB: []rune{0x00FB}, // Case map - 0x00DC: []rune{0x00FC}, // Case map - 0x00DD: []rune{0x00FD}, // Case map - 0x00DE: []rune{0x00FE}, // Case map - 0x00DF: []rune{0x0073, 0x0073}, // Case map - 0x0100: []rune{0x0101}, // Case map - 0x0102: []rune{0x0103}, // Case map - 0x0104: []rune{0x0105}, // Case map - 0x0106: []rune{0x0107}, // Case map - 0x0108: []rune{0x0109}, // Case map - 0x010A: []rune{0x010B}, // Case map - 0x010C: []rune{0x010D}, // Case map - 0x010E: []rune{0x010F}, // Case map - 0x0110: []rune{0x0111}, // Case map - 0x0112: []rune{0x0113}, // Case map - 0x0114: []rune{0x0115}, // Case map - 0x0116: []rune{0x0117}, // Case map - 0x0118: []rune{0x0119}, // Case map - 0x011A: []rune{0x011B}, // Case map - 0x011C: []rune{0x011D}, // Case map - 0x011E: []rune{0x011F}, // Case map - 0x0120: []rune{0x0121}, // Case map - 0x0122: []rune{0x0123}, // Case map - 0x0124: []rune{0x0125}, // Case map - 0x0126: []rune{0x0127}, // Case map - 0x0128: []rune{0x0129}, // Case map - 0x012A: []rune{0x012B}, // Case map - 0x012C: []rune{0x012D}, // Case map - 0x012E: []rune{0x012F}, // Case map - 0x0130: []rune{0x0069, 0x0307}, // Case map - 0x0132: []rune{0x0133}, // Case map - 0x0134: []rune{0x0135}, // Case map - 0x0136: []rune{0x0137}, // Case map - 0x0139: []rune{0x013A}, // Case map - 0x013B: []rune{0x013C}, // Case map - 0x013D: []rune{0x013E}, // Case map - 0x013F: []rune{0x0140}, // Case map - 0x0141: []rune{0x0142}, // Case map - 0x0143: []rune{0x0144}, // Case map - 0x0145: []rune{0x0146}, // Case map - 0x0147: []rune{0x0148}, // Case map - 0x0149: []rune{0x02BC, 0x006E}, // Case map - 0x014A: []rune{0x014B}, // Case map - 0x014C: []rune{0x014D}, // Case map - 0x014E: []rune{0x014F}, // Case map - 0x0150: []rune{0x0151}, // Case map - 0x0152: []rune{0x0153}, // Case map - 0x0154: []rune{0x0155}, // Case map - 0x0156: []rune{0x0157}, // Case map - 0x0158: []rune{0x0159}, // Case map - 0x015A: []rune{0x015B}, // Case map - 0x015C: []rune{0x015D}, // Case map - 0x015E: []rune{0x015F}, // Case map - 0x0160: []rune{0x0161}, // Case map - 0x0162: []rune{0x0163}, // Case map - 0x0164: []rune{0x0165}, // Case map - 0x0166: []rune{0x0167}, // Case map - 0x0168: []rune{0x0169}, // Case map - 0x016A: []rune{0x016B}, // Case map - 0x016C: []rune{0x016D}, // Case map - 0x016E: []rune{0x016F}, // Case map - 0x0170: []rune{0x0171}, // Case map - 0x0172: []rune{0x0173}, // Case map - 0x0174: []rune{0x0175}, // Case map - 0x0176: []rune{0x0177}, // Case map - 0x0178: []rune{0x00FF}, // Case map - 0x0179: []rune{0x017A}, // Case map - 0x017B: []rune{0x017C}, // Case map - 0x017D: []rune{0x017E}, // Case map - 0x017F: []rune{0x0073}, // Case map - 0x0181: []rune{0x0253}, // Case map - 0x0182: []rune{0x0183}, // Case map - 0x0184: []rune{0x0185}, // Case map - 0x0186: []rune{0x0254}, // Case map - 0x0187: []rune{0x0188}, // Case map - 0x0189: []rune{0x0256}, // Case map - 0x018A: []rune{0x0257}, // Case map - 0x018B: []rune{0x018C}, // Case map - 0x018E: []rune{0x01DD}, // Case map - 0x018F: []rune{0x0259}, // Case map - 0x0190: []rune{0x025B}, // Case map - 0x0191: []rune{0x0192}, // Case map - 0x0193: []rune{0x0260}, // Case map - 0x0194: []rune{0x0263}, // Case map - 0x0196: []rune{0x0269}, // Case map - 0x0197: []rune{0x0268}, // Case map - 0x0198: []rune{0x0199}, // Case map - 0x019C: []rune{0x026F}, // Case map - 0x019D: []rune{0x0272}, // Case map - 0x019F: []rune{0x0275}, // Case map - 0x01A0: []rune{0x01A1}, // Case map - 0x01A2: []rune{0x01A3}, // Case map - 0x01A4: []rune{0x01A5}, // Case map - 0x01A6: []rune{0x0280}, // Case map - 0x01A7: []rune{0x01A8}, // Case map - 0x01A9: []rune{0x0283}, // Case map - 0x01AC: []rune{0x01AD}, // Case map - 0x01AE: []rune{0x0288}, // Case map - 0x01AF: []rune{0x01B0}, // Case map - 0x01B1: []rune{0x028A}, // Case map - 0x01B2: []rune{0x028B}, // Case map - 0x01B3: []rune{0x01B4}, // Case map - 0x01B5: []rune{0x01B6}, // Case map - 0x01B7: []rune{0x0292}, // Case map - 0x01B8: []rune{0x01B9}, // Case map - 0x01BC: []rune{0x01BD}, // Case map - 0x01C4: []rune{0x01C6}, // Case map - 0x01C5: []rune{0x01C6}, // Case map - 0x01C7: []rune{0x01C9}, // Case map - 0x01C8: []rune{0x01C9}, // Case map - 0x01CA: []rune{0x01CC}, // Case map - 0x01CB: []rune{0x01CC}, // Case map - 0x01CD: []rune{0x01CE}, // Case map - 0x01CF: []rune{0x01D0}, // Case map - 0x01D1: []rune{0x01D2}, // Case map - 0x01D3: []rune{0x01D4}, // Case map - 0x01D5: []rune{0x01D6}, // Case map - 0x01D7: []rune{0x01D8}, // Case map - 0x01D9: []rune{0x01DA}, // Case map - 0x01DB: []rune{0x01DC}, // Case map - 0x01DE: []rune{0x01DF}, // Case map - 0x01E0: []rune{0x01E1}, // Case map - 0x01E2: []rune{0x01E3}, // Case map - 0x01E4: []rune{0x01E5}, // Case map - 0x01E6: []rune{0x01E7}, // Case map - 0x01E8: []rune{0x01E9}, // Case map - 0x01EA: []rune{0x01EB}, // Case map - 0x01EC: []rune{0x01ED}, // Case map - 0x01EE: []rune{0x01EF}, // Case map - 0x01F0: []rune{0x006A, 0x030C}, // Case map - 0x01F1: []rune{0x01F3}, // Case map - 0x01F2: []rune{0x01F3}, // Case map - 0x01F4: []rune{0x01F5}, // Case map - 0x01F6: []rune{0x0195}, // Case map - 0x01F7: []rune{0x01BF}, // Case map - 0x01F8: []rune{0x01F9}, // Case map - 0x01FA: []rune{0x01FB}, // Case map - 0x01FC: []rune{0x01FD}, // Case map - 0x01FE: []rune{0x01FF}, // Case map - 0x0200: []rune{0x0201}, // Case map - 0x0202: []rune{0x0203}, // Case map - 0x0204: []rune{0x0205}, // Case map - 0x0206: []rune{0x0207}, // Case map - 0x0208: []rune{0x0209}, // Case map - 0x020A: []rune{0x020B}, // Case map - 0x020C: []rune{0x020D}, // Case map - 0x020E: []rune{0x020F}, // Case map - 0x0210: []rune{0x0211}, // Case map - 0x0212: []rune{0x0213}, // Case map - 0x0214: []rune{0x0215}, // Case map - 0x0216: []rune{0x0217}, // Case map - 0x0218: []rune{0x0219}, // Case map - 0x021A: []rune{0x021B}, // Case map - 0x021C: []rune{0x021D}, // Case map - 0x021E: []rune{0x021F}, // Case map - 0x0220: []rune{0x019E}, // Case map - 0x0222: []rune{0x0223}, // Case map - 0x0224: []rune{0x0225}, // Case map - 0x0226: []rune{0x0227}, // Case map - 0x0228: []rune{0x0229}, // Case map - 0x022A: []rune{0x022B}, // Case map - 0x022C: []rune{0x022D}, // Case map - 0x022E: []rune{0x022F}, // Case map - 0x0230: []rune{0x0231}, // Case map - 0x0232: []rune{0x0233}, // Case map - 0x0345: []rune{0x03B9}, // Case map - 0x0386: []rune{0x03AC}, // Case map - 0x0388: []rune{0x03AD}, // Case map - 0x0389: []rune{0x03AE}, // Case map - 0x038A: []rune{0x03AF}, // Case map - 0x038C: []rune{0x03CC}, // Case map - 0x038E: []rune{0x03CD}, // Case map - 0x038F: []rune{0x03CE}, // Case map - 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x0391: []rune{0x03B1}, // Case map - 0x0392: []rune{0x03B2}, // Case map - 0x0393: []rune{0x03B3}, // Case map - 0x0394: []rune{0x03B4}, // Case map - 0x0395: []rune{0x03B5}, // Case map - 0x0396: []rune{0x03B6}, // Case map - 0x0397: []rune{0x03B7}, // Case map - 0x0398: []rune{0x03B8}, // Case map - 0x0399: []rune{0x03B9}, // Case map - 0x039A: []rune{0x03BA}, // Case map - 0x039B: []rune{0x03BB}, // Case map - 0x039C: []rune{0x03BC}, // Case map - 0x039D: []rune{0x03BD}, // Case map - 0x039E: []rune{0x03BE}, // Case map - 0x039F: []rune{0x03BF}, // Case map - 0x03A0: []rune{0x03C0}, // Case map - 0x03A1: []rune{0x03C1}, // Case map - 0x03A3: []rune{0x03C3}, // Case map - 0x03A4: []rune{0x03C4}, // Case map - 0x03A5: []rune{0x03C5}, // Case map - 0x03A6: []rune{0x03C6}, // Case map - 0x03A7: []rune{0x03C7}, // Case map - 0x03A8: []rune{0x03C8}, // Case map - 0x03A9: []rune{0x03C9}, // Case map - 0x03AA: []rune{0x03CA}, // Case map - 0x03AB: []rune{0x03CB}, // Case map - 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x03C2: []rune{0x03C3}, // Case map - 0x03D0: []rune{0x03B2}, // Case map - 0x03D1: []rune{0x03B8}, // Case map - 0x03D5: []rune{0x03C6}, // Case map - 0x03D6: []rune{0x03C0}, // Case map - 0x03D8: []rune{0x03D9}, // Case map - 0x03DA: []rune{0x03DB}, // Case map - 0x03DC: []rune{0x03DD}, // Case map - 0x03DE: []rune{0x03DF}, // Case map - 0x03E0: []rune{0x03E1}, // Case map - 0x03E2: []rune{0x03E3}, // Case map - 0x03E4: []rune{0x03E5}, // Case map - 0x03E6: []rune{0x03E7}, // Case map - 0x03E8: []rune{0x03E9}, // Case map - 0x03EA: []rune{0x03EB}, // Case map - 0x03EC: []rune{0x03ED}, // Case map - 0x03EE: []rune{0x03EF}, // Case map - 0x03F0: []rune{0x03BA}, // Case map - 0x03F1: []rune{0x03C1}, // Case map - 0x03F2: []rune{0x03C3}, // Case map - 0x03F4: []rune{0x03B8}, // Case map - 0x03F5: []rune{0x03B5}, // Case map - 0x0400: []rune{0x0450}, // Case map - 0x0401: []rune{0x0451}, // Case map - 0x0402: []rune{0x0452}, // Case map - 0x0403: []rune{0x0453}, // Case map - 0x0404: []rune{0x0454}, // Case map - 0x0405: []rune{0x0455}, // Case map - 0x0406: []rune{0x0456}, // Case map - 0x0407: []rune{0x0457}, // Case map - 0x0408: []rune{0x0458}, // Case map - 0x0409: []rune{0x0459}, // Case map - 0x040A: []rune{0x045A}, // Case map - 0x040B: []rune{0x045B}, // Case map - 0x040C: []rune{0x045C}, // Case map - 0x040D: []rune{0x045D}, // Case map - 0x040E: []rune{0x045E}, // Case map - 0x040F: []rune{0x045F}, // Case map - 0x0410: []rune{0x0430}, // Case map - 0x0411: []rune{0x0431}, // Case map - 0x0412: []rune{0x0432}, // Case map - 0x0413: []rune{0x0433}, // Case map - 0x0414: []rune{0x0434}, // Case map - 0x0415: []rune{0x0435}, // Case map - 0x0416: []rune{0x0436}, // Case map - 0x0417: []rune{0x0437}, // Case map - 0x0418: []rune{0x0438}, // Case map - 0x0419: []rune{0x0439}, // Case map - 0x041A: []rune{0x043A}, // Case map - 0x041B: []rune{0x043B}, // Case map - 0x041C: []rune{0x043C}, // Case map - 0x041D: []rune{0x043D}, // Case map - 0x041E: []rune{0x043E}, // Case map - 0x041F: []rune{0x043F}, // Case map - 0x0420: []rune{0x0440}, // Case map - 0x0421: []rune{0x0441}, // Case map - 0x0422: []rune{0x0442}, // Case map - 0x0423: []rune{0x0443}, // Case map - 0x0424: []rune{0x0444}, // Case map - 0x0425: []rune{0x0445}, // Case map - 0x0426: []rune{0x0446}, // Case map - 0x0427: []rune{0x0447}, // Case map - 0x0428: []rune{0x0448}, // Case map - 0x0429: []rune{0x0449}, // Case map - 0x042A: []rune{0x044A}, // Case map - 0x042B: []rune{0x044B}, // Case map - 0x042C: []rune{0x044C}, // Case map - 0x042D: []rune{0x044D}, // Case map - 0x042E: []rune{0x044E}, // Case map - 0x042F: []rune{0x044F}, // Case map - 0x0460: []rune{0x0461}, // Case map - 0x0462: []rune{0x0463}, // Case map - 0x0464: []rune{0x0465}, // Case map - 0x0466: []rune{0x0467}, // Case map - 0x0468: []rune{0x0469}, // Case map - 0x046A: []rune{0x046B}, // Case map - 0x046C: []rune{0x046D}, // Case map - 0x046E: []rune{0x046F}, // Case map - 0x0470: []rune{0x0471}, // Case map - 0x0472: []rune{0x0473}, // Case map - 0x0474: []rune{0x0475}, // Case map - 0x0476: []rune{0x0477}, // Case map - 0x0478: []rune{0x0479}, // Case map - 0x047A: []rune{0x047B}, // Case map - 0x047C: []rune{0x047D}, // Case map - 0x047E: []rune{0x047F}, // Case map - 0x0480: []rune{0x0481}, // Case map - 0x048A: []rune{0x048B}, // Case map - 0x048C: []rune{0x048D}, // Case map - 0x048E: []rune{0x048F}, // Case map - 0x0490: []rune{0x0491}, // Case map - 0x0492: []rune{0x0493}, // Case map - 0x0494: []rune{0x0495}, // Case map - 0x0496: []rune{0x0497}, // Case map - 0x0498: []rune{0x0499}, // Case map - 0x049A: []rune{0x049B}, // Case map - 0x049C: []rune{0x049D}, // Case map - 0x049E: []rune{0x049F}, // Case map - 0x04A0: []rune{0x04A1}, // Case map - 0x04A2: []rune{0x04A3}, // Case map - 0x04A4: []rune{0x04A5}, // Case map - 0x04A6: []rune{0x04A7}, // Case map - 0x04A8: []rune{0x04A9}, // Case map - 0x04AA: []rune{0x04AB}, // Case map - 0x04AC: []rune{0x04AD}, // Case map - 0x04AE: []rune{0x04AF}, // Case map - 0x04B0: []rune{0x04B1}, // Case map - 0x04B2: []rune{0x04B3}, // Case map - 0x04B4: []rune{0x04B5}, // Case map - 0x04B6: []rune{0x04B7}, // Case map - 0x04B8: []rune{0x04B9}, // Case map - 0x04BA: []rune{0x04BB}, // Case map - 0x04BC: []rune{0x04BD}, // Case map - 0x04BE: []rune{0x04BF}, // Case map - 0x04C1: []rune{0x04C2}, // Case map - 0x04C3: []rune{0x04C4}, // Case map - 0x04C5: []rune{0x04C6}, // Case map - 0x04C7: []rune{0x04C8}, // Case map - 0x04C9: []rune{0x04CA}, // Case map - 0x04CB: []rune{0x04CC}, // Case map - 0x04CD: []rune{0x04CE}, // Case map - 0x04D0: []rune{0x04D1}, // Case map - 0x04D2: []rune{0x04D3}, // Case map - 0x04D4: []rune{0x04D5}, // Case map - 0x04D6: []rune{0x04D7}, // Case map - 0x04D8: []rune{0x04D9}, // Case map - 0x04DA: []rune{0x04DB}, // Case map - 0x04DC: []rune{0x04DD}, // Case map - 0x04DE: []rune{0x04DF}, // Case map - 0x04E0: []rune{0x04E1}, // Case map - 0x04E2: []rune{0x04E3}, // Case map - 0x04E4: []rune{0x04E5}, // Case map - 0x04E6: []rune{0x04E7}, // Case map - 0x04E8: []rune{0x04E9}, // Case map - 0x04EA: []rune{0x04EB}, // Case map - 0x04EC: []rune{0x04ED}, // Case map - 0x04EE: []rune{0x04EF}, // Case map - 0x04F0: []rune{0x04F1}, // Case map - 0x04F2: []rune{0x04F3}, // Case map - 0x04F4: []rune{0x04F5}, // Case map - 0x04F8: []rune{0x04F9}, // Case map - 0x0500: []rune{0x0501}, // Case map - 0x0502: []rune{0x0503}, // Case map - 0x0504: []rune{0x0505}, // Case map - 0x0506: []rune{0x0507}, // Case map - 0x0508: []rune{0x0509}, // Case map - 0x050A: []rune{0x050B}, // Case map - 0x050C: []rune{0x050D}, // Case map - 0x050E: []rune{0x050F}, // Case map - 0x0531: []rune{0x0561}, // Case map - 0x0532: []rune{0x0562}, // Case map - 0x0533: []rune{0x0563}, // Case map - 0x0534: []rune{0x0564}, // Case map - 0x0535: []rune{0x0565}, // Case map - 0x0536: []rune{0x0566}, // Case map - 0x0537: []rune{0x0567}, // Case map - 0x0538: []rune{0x0568}, // Case map - 0x0539: []rune{0x0569}, // Case map - 0x053A: []rune{0x056A}, // Case map - 0x053B: []rune{0x056B}, // Case map - 0x053C: []rune{0x056C}, // Case map - 0x053D: []rune{0x056D}, // Case map - 0x053E: []rune{0x056E}, // Case map - 0x053F: []rune{0x056F}, // Case map - 0x0540: []rune{0x0570}, // Case map - 0x0541: []rune{0x0571}, // Case map - 0x0542: []rune{0x0572}, // Case map - 0x0543: []rune{0x0573}, // Case map - 0x0544: []rune{0x0574}, // Case map - 0x0545: []rune{0x0575}, // Case map - 0x0546: []rune{0x0576}, // Case map - 0x0547: []rune{0x0577}, // Case map - 0x0548: []rune{0x0578}, // Case map - 0x0549: []rune{0x0579}, // Case map - 0x054A: []rune{0x057A}, // Case map - 0x054B: []rune{0x057B}, // Case map - 0x054C: []rune{0x057C}, // Case map - 0x054D: []rune{0x057D}, // Case map - 0x054E: []rune{0x057E}, // Case map - 0x054F: []rune{0x057F}, // Case map - 0x0550: []rune{0x0580}, // Case map - 0x0551: []rune{0x0581}, // Case map - 0x0552: []rune{0x0582}, // Case map - 0x0553: []rune{0x0583}, // Case map - 0x0554: []rune{0x0584}, // Case map - 0x0555: []rune{0x0585}, // Case map - 0x0556: []rune{0x0586}, // Case map - 0x0587: []rune{0x0565, 0x0582}, // Case map - 0x1E00: []rune{0x1E01}, // Case map - 0x1E02: []rune{0x1E03}, // Case map - 0x1E04: []rune{0x1E05}, // Case map - 0x1E06: []rune{0x1E07}, // Case map - 0x1E08: []rune{0x1E09}, // Case map - 0x1E0A: []rune{0x1E0B}, // Case map - 0x1E0C: []rune{0x1E0D}, // Case map - 0x1E0E: []rune{0x1E0F}, // Case map - 0x1E10: []rune{0x1E11}, // Case map - 0x1E12: []rune{0x1E13}, // Case map - 0x1E14: []rune{0x1E15}, // Case map - 0x1E16: []rune{0x1E17}, // Case map - 0x1E18: []rune{0x1E19}, // Case map - 0x1E1A: []rune{0x1E1B}, // Case map - 0x1E1C: []rune{0x1E1D}, // Case map - 0x1E1E: []rune{0x1E1F}, // Case map - 0x1E20: []rune{0x1E21}, // Case map - 0x1E22: []rune{0x1E23}, // Case map - 0x1E24: []rune{0x1E25}, // Case map - 0x1E26: []rune{0x1E27}, // Case map - 0x1E28: []rune{0x1E29}, // Case map - 0x1E2A: []rune{0x1E2B}, // Case map - 0x1E2C: []rune{0x1E2D}, // Case map - 0x1E2E: []rune{0x1E2F}, // Case map - 0x1E30: []rune{0x1E31}, // Case map - 0x1E32: []rune{0x1E33}, // Case map - 0x1E34: []rune{0x1E35}, // Case map - 0x1E36: []rune{0x1E37}, // Case map - 0x1E38: []rune{0x1E39}, // Case map - 0x1E3A: []rune{0x1E3B}, // Case map - 0x1E3C: []rune{0x1E3D}, // Case map - 0x1E3E: []rune{0x1E3F}, // Case map - 0x1E40: []rune{0x1E41}, // Case map - 0x1E42: []rune{0x1E43}, // Case map - 0x1E44: []rune{0x1E45}, // Case map - 0x1E46: []rune{0x1E47}, // Case map - 0x1E48: []rune{0x1E49}, // Case map - 0x1E4A: []rune{0x1E4B}, // Case map - 0x1E4C: []rune{0x1E4D}, // Case map - 0x1E4E: []rune{0x1E4F}, // Case map - 0x1E50: []rune{0x1E51}, // Case map - 0x1E52: []rune{0x1E53}, // Case map - 0x1E54: []rune{0x1E55}, // Case map - 0x1E56: []rune{0x1E57}, // Case map - 0x1E58: []rune{0x1E59}, // Case map - 0x1E5A: []rune{0x1E5B}, // Case map - 0x1E5C: []rune{0x1E5D}, // Case map - 0x1E5E: []rune{0x1E5F}, // Case map - 0x1E60: []rune{0x1E61}, // Case map - 0x1E62: []rune{0x1E63}, // Case map - 0x1E64: []rune{0x1E65}, // Case map - 0x1E66: []rune{0x1E67}, // Case map - 0x1E68: []rune{0x1E69}, // Case map - 0x1E6A: []rune{0x1E6B}, // Case map - 0x1E6C: []rune{0x1E6D}, // Case map - 0x1E6E: []rune{0x1E6F}, // Case map - 0x1E70: []rune{0x1E71}, // Case map - 0x1E72: []rune{0x1E73}, // Case map - 0x1E74: []rune{0x1E75}, // Case map - 0x1E76: []rune{0x1E77}, // Case map - 0x1E78: []rune{0x1E79}, // Case map - 0x1E7A: []rune{0x1E7B}, // Case map - 0x1E7C: []rune{0x1E7D}, // Case map - 0x1E7E: []rune{0x1E7F}, // Case map - 0x1E80: []rune{0x1E81}, // Case map - 0x1E82: []rune{0x1E83}, // Case map - 0x1E84: []rune{0x1E85}, // Case map - 0x1E86: []rune{0x1E87}, // Case map - 0x1E88: []rune{0x1E89}, // Case map - 0x1E8A: []rune{0x1E8B}, // Case map - 0x1E8C: []rune{0x1E8D}, // Case map - 0x1E8E: []rune{0x1E8F}, // Case map - 0x1E90: []rune{0x1E91}, // Case map - 0x1E92: []rune{0x1E93}, // Case map - 0x1E94: []rune{0x1E95}, // Case map - 0x1E96: []rune{0x0068, 0x0331}, // Case map - 0x1E97: []rune{0x0074, 0x0308}, // Case map - 0x1E98: []rune{0x0077, 0x030A}, // Case map - 0x1E99: []rune{0x0079, 0x030A}, // Case map - 0x1E9A: []rune{0x0061, 0x02BE}, // Case map - 0x1E9B: []rune{0x1E61}, // Case map - 0x1EA0: []rune{0x1EA1}, // Case map - 0x1EA2: []rune{0x1EA3}, // Case map - 0x1EA4: []rune{0x1EA5}, // Case map - 0x1EA6: []rune{0x1EA7}, // Case map - 0x1EA8: []rune{0x1EA9}, // Case map - 0x1EAA: []rune{0x1EAB}, // Case map - 0x1EAC: []rune{0x1EAD}, // Case map - 0x1EAE: []rune{0x1EAF}, // Case map - 0x1EB0: []rune{0x1EB1}, // Case map - 0x1EB2: []rune{0x1EB3}, // Case map - 0x1EB4: []rune{0x1EB5}, // Case map - 0x1EB6: []rune{0x1EB7}, // Case map - 0x1EB8: []rune{0x1EB9}, // Case map - 0x1EBA: []rune{0x1EBB}, // Case map - 0x1EBC: []rune{0x1EBD}, // Case map - 0x1EBE: []rune{0x1EBF}, // Case map - 0x1EC0: []rune{0x1EC1}, // Case map - 0x1EC2: []rune{0x1EC3}, // Case map - 0x1EC4: []rune{0x1EC5}, // Case map - 0x1EC6: []rune{0x1EC7}, // Case map - 0x1EC8: []rune{0x1EC9}, // Case map - 0x1ECA: []rune{0x1ECB}, // Case map - 0x1ECC: []rune{0x1ECD}, // Case map - 0x1ECE: []rune{0x1ECF}, // Case map - 0x1ED0: []rune{0x1ED1}, // Case map - 0x1ED2: []rune{0x1ED3}, // Case map - 0x1ED4: []rune{0x1ED5}, // Case map - 0x1ED6: []rune{0x1ED7}, // Case map - 0x1ED8: []rune{0x1ED9}, // Case map - 0x1EDA: []rune{0x1EDB}, // Case map - 0x1EDC: []rune{0x1EDD}, // Case map - 0x1EDE: []rune{0x1EDF}, // Case map - 0x1EE0: []rune{0x1EE1}, // Case map - 0x1EE2: []rune{0x1EE3}, // Case map - 0x1EE4: []rune{0x1EE5}, // Case map - 0x1EE6: []rune{0x1EE7}, // Case map - 0x1EE8: []rune{0x1EE9}, // Case map - 0x1EEA: []rune{0x1EEB}, // Case map - 0x1EEC: []rune{0x1EED}, // Case map - 0x1EEE: []rune{0x1EEF}, // Case map - 0x1EF0: []rune{0x1EF1}, // Case map - 0x1EF2: []rune{0x1EF3}, // Case map - 0x1EF4: []rune{0x1EF5}, // Case map - 0x1EF6: []rune{0x1EF7}, // Case map - 0x1EF8: []rune{0x1EF9}, // Case map - 0x1F08: []rune{0x1F00}, // Case map - 0x1F09: []rune{0x1F01}, // Case map - 0x1F0A: []rune{0x1F02}, // Case map - 0x1F0B: []rune{0x1F03}, // Case map - 0x1F0C: []rune{0x1F04}, // Case map - 0x1F0D: []rune{0x1F05}, // Case map - 0x1F0E: []rune{0x1F06}, // Case map - 0x1F0F: []rune{0x1F07}, // Case map - 0x1F18: []rune{0x1F10}, // Case map - 0x1F19: []rune{0x1F11}, // Case map - 0x1F1A: []rune{0x1F12}, // Case map - 0x1F1B: []rune{0x1F13}, // Case map - 0x1F1C: []rune{0x1F14}, // Case map - 0x1F1D: []rune{0x1F15}, // Case map - 0x1F28: []rune{0x1F20}, // Case map - 0x1F29: []rune{0x1F21}, // Case map - 0x1F2A: []rune{0x1F22}, // Case map - 0x1F2B: []rune{0x1F23}, // Case map - 0x1F2C: []rune{0x1F24}, // Case map - 0x1F2D: []rune{0x1F25}, // Case map - 0x1F2E: []rune{0x1F26}, // Case map - 0x1F2F: []rune{0x1F27}, // Case map - 0x1F38: []rune{0x1F30}, // Case map - 0x1F39: []rune{0x1F31}, // Case map - 0x1F3A: []rune{0x1F32}, // Case map - 0x1F3B: []rune{0x1F33}, // Case map - 0x1F3C: []rune{0x1F34}, // Case map - 0x1F3D: []rune{0x1F35}, // Case map - 0x1F3E: []rune{0x1F36}, // Case map - 0x1F3F: []rune{0x1F37}, // Case map - 0x1F48: []rune{0x1F40}, // Case map - 0x1F49: []rune{0x1F41}, // Case map - 0x1F4A: []rune{0x1F42}, // Case map - 0x1F4B: []rune{0x1F43}, // Case map - 0x1F4C: []rune{0x1F44}, // Case map - 0x1F4D: []rune{0x1F45}, // Case map - 0x1F50: []rune{0x03C5, 0x0313}, // Case map - 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map - 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map - 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map - 0x1F59: []rune{0x1F51}, // Case map - 0x1F5B: []rune{0x1F53}, // Case map - 0x1F5D: []rune{0x1F55}, // Case map - 0x1F5F: []rune{0x1F57}, // Case map - 0x1F68: []rune{0x1F60}, // Case map - 0x1F69: []rune{0x1F61}, // Case map - 0x1F6A: []rune{0x1F62}, // Case map - 0x1F6B: []rune{0x1F63}, // Case map - 0x1F6C: []rune{0x1F64}, // Case map - 0x1F6D: []rune{0x1F65}, // Case map - 0x1F6E: []rune{0x1F66}, // Case map - 0x1F6F: []rune{0x1F67}, // Case map - 0x1F80: []rune{0x1F00, 0x03B9}, // Case map - 0x1F81: []rune{0x1F01, 0x03B9}, // Case map - 0x1F82: []rune{0x1F02, 0x03B9}, // Case map - 0x1F83: []rune{0x1F03, 0x03B9}, // Case map - 0x1F84: []rune{0x1F04, 0x03B9}, // Case map - 0x1F85: []rune{0x1F05, 0x03B9}, // Case map - 0x1F86: []rune{0x1F06, 0x03B9}, // Case map - 0x1F87: []rune{0x1F07, 0x03B9}, // Case map - 0x1F88: []rune{0x1F00, 0x03B9}, // Case map - 0x1F89: []rune{0x1F01, 0x03B9}, // Case map - 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map - 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map - 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map - 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map - 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map - 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map - 0x1F90: []rune{0x1F20, 0x03B9}, // Case map - 0x1F91: []rune{0x1F21, 0x03B9}, // Case map - 0x1F92: []rune{0x1F22, 0x03B9}, // Case map - 0x1F93: []rune{0x1F23, 0x03B9}, // Case map - 0x1F94: []rune{0x1F24, 0x03B9}, // Case map - 0x1F95: []rune{0x1F25, 0x03B9}, // Case map - 0x1F96: []rune{0x1F26, 0x03B9}, // Case map - 0x1F97: []rune{0x1F27, 0x03B9}, // Case map - 0x1F98: []rune{0x1F20, 0x03B9}, // Case map - 0x1F99: []rune{0x1F21, 0x03B9}, // Case map - 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map - 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map - 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map - 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map - 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map - 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map - 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map - 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map - 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map - 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map - 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map - 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map - 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map - 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map - 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map - 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map - 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map - 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map - 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map - 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map - 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map - 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map - 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map - 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map - 0x1FB6: []rune{0x03B1, 0x0342}, // Case map - 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map - 0x1FB8: []rune{0x1FB0}, // Case map - 0x1FB9: []rune{0x1FB1}, // Case map - 0x1FBA: []rune{0x1F70}, // Case map - 0x1FBB: []rune{0x1F71}, // Case map - 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map - 0x1FBE: []rune{0x03B9}, // Case map - 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map - 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map - 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map - 0x1FC6: []rune{0x03B7, 0x0342}, // Case map - 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map - 0x1FC8: []rune{0x1F72}, // Case map - 0x1FC9: []rune{0x1F73}, // Case map - 0x1FCA: []rune{0x1F74}, // Case map - 0x1FCB: []rune{0x1F75}, // Case map - 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map - 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map - 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map - 0x1FD6: []rune{0x03B9, 0x0342}, // Case map - 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map - 0x1FD8: []rune{0x1FD0}, // Case map - 0x1FD9: []rune{0x1FD1}, // Case map - 0x1FDA: []rune{0x1F76}, // Case map - 0x1FDB: []rune{0x1F77}, // Case map - 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map - 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map - 0x1FE4: []rune{0x03C1, 0x0313}, // Case map - 0x1FE6: []rune{0x03C5, 0x0342}, // Case map - 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map - 0x1FE8: []rune{0x1FE0}, // Case map - 0x1FE9: []rune{0x1FE1}, // Case map - 0x1FEA: []rune{0x1F7A}, // Case map - 0x1FEB: []rune{0x1F7B}, // Case map - 0x1FEC: []rune{0x1FE5}, // Case map - 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map - 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map - 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map - 0x1FF6: []rune{0x03C9, 0x0342}, // Case map - 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map - 0x1FF8: []rune{0x1F78}, // Case map - 0x1FF9: []rune{0x1F79}, // Case map - 0x1FFA: []rune{0x1F7C}, // Case map - 0x1FFB: []rune{0x1F7D}, // Case map - 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map - 0x2126: []rune{0x03C9}, // Case map - 0x212A: []rune{0x006B}, // Case map - 0x212B: []rune{0x00E5}, // Case map - 0x2160: []rune{0x2170}, // Case map - 0x2161: []rune{0x2171}, // Case map - 0x2162: []rune{0x2172}, // Case map - 0x2163: []rune{0x2173}, // Case map - 0x2164: []rune{0x2174}, // Case map - 0x2165: []rune{0x2175}, // Case map - 0x2166: []rune{0x2176}, // Case map - 0x2167: []rune{0x2177}, // Case map - 0x2168: []rune{0x2178}, // Case map - 0x2169: []rune{0x2179}, // Case map - 0x216A: []rune{0x217A}, // Case map - 0x216B: []rune{0x217B}, // Case map - 0x216C: []rune{0x217C}, // Case map - 0x216D: []rune{0x217D}, // Case map - 0x216E: []rune{0x217E}, // Case map - 0x216F: []rune{0x217F}, // Case map - 0x24B6: []rune{0x24D0}, // Case map - 0x24B7: []rune{0x24D1}, // Case map - 0x24B8: []rune{0x24D2}, // Case map - 0x24B9: []rune{0x24D3}, // Case map - 0x24BA: []rune{0x24D4}, // Case map - 0x24BB: []rune{0x24D5}, // Case map - 0x24BC: []rune{0x24D6}, // Case map - 0x24BD: []rune{0x24D7}, // Case map - 0x24BE: []rune{0x24D8}, // Case map - 0x24BF: []rune{0x24D9}, // Case map - 0x24C0: []rune{0x24DA}, // Case map - 0x24C1: []rune{0x24DB}, // Case map - 0x24C2: []rune{0x24DC}, // Case map - 0x24C3: []rune{0x24DD}, // Case map - 0x24C4: []rune{0x24DE}, // Case map - 0x24C5: []rune{0x24DF}, // Case map - 0x24C6: []rune{0x24E0}, // Case map - 0x24C7: []rune{0x24E1}, // Case map - 0x24C8: []rune{0x24E2}, // Case map - 0x24C9: []rune{0x24E3}, // Case map - 0x24CA: []rune{0x24E4}, // Case map - 0x24CB: []rune{0x24E5}, // Case map - 0x24CC: []rune{0x24E6}, // Case map - 0x24CD: []rune{0x24E7}, // Case map - 0x24CE: []rune{0x24E8}, // Case map - 0x24CF: []rune{0x24E9}, // Case map - 0xFB00: []rune{0x0066, 0x0066}, // Case map - 0xFB01: []rune{0x0066, 0x0069}, // Case map - 0xFB02: []rune{0x0066, 0x006C}, // Case map - 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map - 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map - 0xFB05: []rune{0x0073, 0x0074}, // Case map - 0xFB06: []rune{0x0073, 0x0074}, // Case map - 0xFB13: []rune{0x0574, 0x0576}, // Case map - 0xFB14: []rune{0x0574, 0x0565}, // Case map - 0xFB15: []rune{0x0574, 0x056B}, // Case map - 0xFB16: []rune{0x057E, 0x0576}, // Case map - 0xFB17: []rune{0x0574, 0x056D}, // Case map - 0xFF21: []rune{0xFF41}, // Case map - 0xFF22: []rune{0xFF42}, // Case map - 0xFF23: []rune{0xFF43}, // Case map - 0xFF24: []rune{0xFF44}, // Case map - 0xFF25: []rune{0xFF45}, // Case map - 0xFF26: []rune{0xFF46}, // Case map - 0xFF27: []rune{0xFF47}, // Case map - 0xFF28: []rune{0xFF48}, // Case map - 0xFF29: []rune{0xFF49}, // Case map - 0xFF2A: []rune{0xFF4A}, // Case map - 0xFF2B: []rune{0xFF4B}, // Case map - 0xFF2C: []rune{0xFF4C}, // Case map - 0xFF2D: []rune{0xFF4D}, // Case map - 0xFF2E: []rune{0xFF4E}, // Case map - 0xFF2F: []rune{0xFF4F}, // Case map - 0xFF30: []rune{0xFF50}, // Case map - 0xFF31: []rune{0xFF51}, // Case map - 0xFF32: []rune{0xFF52}, // Case map - 0xFF33: []rune{0xFF53}, // Case map - 0xFF34: []rune{0xFF54}, // Case map - 0xFF35: []rune{0xFF55}, // Case map - 0xFF36: []rune{0xFF56}, // Case map - 0xFF37: []rune{0xFF57}, // Case map - 0xFF38: []rune{0xFF58}, // Case map - 0xFF39: []rune{0xFF59}, // Case map - 0xFF3A: []rune{0xFF5A}, // Case map - 0x10400: []rune{0x10428}, // Case map - 0x10401: []rune{0x10429}, // Case map - 0x10402: []rune{0x1042A}, // Case map - 0x10403: []rune{0x1042B}, // Case map - 0x10404: []rune{0x1042C}, // Case map - 0x10405: []rune{0x1042D}, // Case map - 0x10406: []rune{0x1042E}, // Case map - 0x10407: []rune{0x1042F}, // Case map - 0x10408: []rune{0x10430}, // Case map - 0x10409: []rune{0x10431}, // Case map - 0x1040A: []rune{0x10432}, // Case map - 0x1040B: []rune{0x10433}, // Case map - 0x1040C: []rune{0x10434}, // Case map - 0x1040D: []rune{0x10435}, // Case map - 0x1040E: []rune{0x10436}, // Case map - 0x1040F: []rune{0x10437}, // Case map - 0x10410: []rune{0x10438}, // Case map - 0x10411: []rune{0x10439}, // Case map - 0x10412: []rune{0x1043A}, // Case map - 0x10413: []rune{0x1043B}, // Case map - 0x10414: []rune{0x1043C}, // Case map - 0x10415: []rune{0x1043D}, // Case map - 0x10416: []rune{0x1043E}, // Case map - 0x10417: []rune{0x1043F}, // Case map - 0x10418: []rune{0x10440}, // Case map - 0x10419: []rune{0x10441}, // Case map - 0x1041A: []rune{0x10442}, // Case map - 0x1041B: []rune{0x10443}, // Case map - 0x1041C: []rune{0x10444}, // Case map - 0x1041D: []rune{0x10445}, // Case map - 0x1041E: []rune{0x10446}, // Case map - 0x1041F: []rune{0x10447}, // Case map - 0x10420: []rune{0x10448}, // Case map - 0x10421: []rune{0x10449}, // Case map - 0x10422: []rune{0x1044A}, // Case map - 0x10423: []rune{0x1044B}, // Case map - 0x10424: []rune{0x1044C}, // Case map - 0x10425: []rune{0x1044D}, // Case map -} - -// TableB3 represents RFC-3454 Table B.3. -var TableB3 Mapping = tableB3 - -var tableC1_1 = Set{ - RuneRange{0x0020, 0x0020}, // SPACE -} - -// TableC1_1 represents RFC-3454 Table C.1.1. -var TableC1_1 Set = tableC1_1 - -var tableC1_2 = Set{ - RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE - RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK - RuneRange{0x2000, 0x2000}, // EN QUAD - RuneRange{0x2001, 0x2001}, // EM QUAD - RuneRange{0x2002, 0x2002}, // EN SPACE - RuneRange{0x2003, 0x2003}, // EM SPACE - RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE - RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE - RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE - RuneRange{0x2007, 0x2007}, // FIGURE SPACE - RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE - RuneRange{0x2009, 0x2009}, // THIN SPACE - RuneRange{0x200A, 0x200A}, // HAIR SPACE - RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE - RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE - RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE - RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE -} - -// TableC1_2 represents RFC-3454 Table C.1.2. -var TableC1_2 Set = tableC1_2 - -var tableC2_1 = Set{ - RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS] - RuneRange{0x007F, 0x007F}, // DELETE -} - -// TableC2_1 represents RFC-3454 Table C.2.1. -var TableC2_1 Set = tableC2_1 - -var tableC2_2 = Set{ - RuneRange{0x0080, 0x009F}, // [CONTROL CHARACTERS] - RuneRange{0x06DD, 0x06DD}, // ARABIC END OF AYAH - RuneRange{0x070F, 0x070F}, // SYRIAC ABBREVIATION MARK - RuneRange{0x180E, 0x180E}, // MONGOLIAN VOWEL SEPARATOR - RuneRange{0x200C, 0x200C}, // ZERO WIDTH NON-JOINER - RuneRange{0x200D, 0x200D}, // ZERO WIDTH JOINER - RuneRange{0x2028, 0x2028}, // LINE SEPARATOR - RuneRange{0x2029, 0x2029}, // PARAGRAPH SEPARATOR - RuneRange{0x2060, 0x2060}, // WORD JOINER - RuneRange{0x2061, 0x2061}, // FUNCTION APPLICATION - RuneRange{0x2062, 0x2062}, // INVISIBLE TIMES - RuneRange{0x2063, 0x2063}, // INVISIBLE SEPARATOR - RuneRange{0x206A, 0x206F}, // [CONTROL CHARACTERS] - RuneRange{0xFEFF, 0xFEFF}, // ZERO WIDTH NO-BREAK SPACE - RuneRange{0xFFF9, 0xFFFC}, // [CONTROL CHARACTERS] - RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS] -} - -// TableC2_2 represents RFC-3454 Table C.2.2. -var TableC2_2 Set = tableC2_2 - -var tableC3 = Set{ - RuneRange{0xE000, 0xF8FF}, // [PRIVATE USE, PLANE 0] - RuneRange{0xF0000, 0xFFFFD}, // [PRIVATE USE, PLANE 15] - RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16] -} - -// TableC3 represents RFC-3454 Table C.3. -var TableC3 Set = tableC3 - -var tableC4 = Set{ - RuneRange{0xFDD0, 0xFDEF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xFFFE, 0xFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x1FFFE, 0x1FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x2FFFE, 0x2FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x3FFFE, 0x3FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x4FFFE, 0x4FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x5FFFE, 0x5FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x6FFFE, 0x6FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x7FFFE, 0x7FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x8FFFE, 0x8FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x9FFFE, 0x9FFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xAFFFE, 0xAFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xBFFFE, 0xBFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xCFFFE, 0xCFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xDFFFE, 0xDFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xEFFFE, 0xEFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0xFFFFE, 0xFFFFF}, // [NONCHARACTER CODE POINTS] - RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS] -} - -// TableC4 represents RFC-3454 Table C.4. -var TableC4 Set = tableC4 - -var tableC5 = Set{ - RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES] -} - -// TableC5 represents RFC-3454 Table C.5. -var TableC5 Set = tableC5 - -var tableC6 = Set{ - RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR - RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR - RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR - RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER - RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER -} - -// TableC6 represents RFC-3454 Table C.6. -var TableC6 Set = tableC6 - -var tableC7 = Set{ - RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS] -} - -// TableC7 represents RFC-3454 Table C.7. -var TableC7 Set = tableC7 - -var tableC8 = Set{ - RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK - RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK - RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK - RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK - RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING - RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING - RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING - RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE - RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE - RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING - RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING - RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING - RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING - RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES - RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES -} - -// TableC8 represents RFC-3454 Table C.8. -var TableC8 Set = tableC8 - -var tableC9 = Set{ - RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG - RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS] -} - -// TableC9 represents RFC-3454 Table C.9. -var TableC9 Set = tableC9 - -var tableD1 = Set{ - RuneRange{0x05BE, 0x05BE}, - RuneRange{0x05C0, 0x05C0}, - RuneRange{0x05C3, 0x05C3}, - RuneRange{0x05D0, 0x05EA}, - RuneRange{0x05F0, 0x05F4}, - RuneRange{0x061B, 0x061B}, - RuneRange{0x061F, 0x061F}, - RuneRange{0x0621, 0x063A}, - RuneRange{0x0640, 0x064A}, - RuneRange{0x066D, 0x066F}, - RuneRange{0x0671, 0x06D5}, - RuneRange{0x06DD, 0x06DD}, - RuneRange{0x06E5, 0x06E6}, - RuneRange{0x06FA, 0x06FE}, - RuneRange{0x0700, 0x070D}, - RuneRange{0x0710, 0x0710}, - RuneRange{0x0712, 0x072C}, - RuneRange{0x0780, 0x07A5}, - RuneRange{0x07B1, 0x07B1}, - RuneRange{0x200F, 0x200F}, - RuneRange{0xFB1D, 0xFB1D}, - RuneRange{0xFB1F, 0xFB28}, - RuneRange{0xFB2A, 0xFB36}, - RuneRange{0xFB38, 0xFB3C}, - RuneRange{0xFB3E, 0xFB3E}, - RuneRange{0xFB40, 0xFB41}, - RuneRange{0xFB43, 0xFB44}, - RuneRange{0xFB46, 0xFBB1}, - RuneRange{0xFBD3, 0xFD3D}, - RuneRange{0xFD50, 0xFD8F}, - RuneRange{0xFD92, 0xFDC7}, - RuneRange{0xFDF0, 0xFDFC}, - RuneRange{0xFE70, 0xFE74}, - RuneRange{0xFE76, 0xFEFC}, -} - -// TableD1 represents RFC-3454 Table D.1. -var TableD1 Set = tableD1 - -var tableD2 = Set{ - RuneRange{0x0041, 0x005A}, - RuneRange{0x0061, 0x007A}, - RuneRange{0x00AA, 0x00AA}, - RuneRange{0x00B5, 0x00B5}, - RuneRange{0x00BA, 0x00BA}, - RuneRange{0x00C0, 0x00D6}, - RuneRange{0x00D8, 0x00F6}, - RuneRange{0x00F8, 0x0220}, - RuneRange{0x0222, 0x0233}, - RuneRange{0x0250, 0x02AD}, - RuneRange{0x02B0, 0x02B8}, - RuneRange{0x02BB, 0x02C1}, - RuneRange{0x02D0, 0x02D1}, - RuneRange{0x02E0, 0x02E4}, - RuneRange{0x02EE, 0x02EE}, - RuneRange{0x037A, 0x037A}, - RuneRange{0x0386, 0x0386}, - RuneRange{0x0388, 0x038A}, - RuneRange{0x038C, 0x038C}, - RuneRange{0x038E, 0x03A1}, - RuneRange{0x03A3, 0x03CE}, - RuneRange{0x03D0, 0x03F5}, - RuneRange{0x0400, 0x0482}, - RuneRange{0x048A, 0x04CE}, - RuneRange{0x04D0, 0x04F5}, - RuneRange{0x04F8, 0x04F9}, - RuneRange{0x0500, 0x050F}, - RuneRange{0x0531, 0x0556}, - RuneRange{0x0559, 0x055F}, - RuneRange{0x0561, 0x0587}, - RuneRange{0x0589, 0x0589}, - RuneRange{0x0903, 0x0903}, - RuneRange{0x0905, 0x0939}, - RuneRange{0x093D, 0x0940}, - RuneRange{0x0949, 0x094C}, - RuneRange{0x0950, 0x0950}, - RuneRange{0x0958, 0x0961}, - RuneRange{0x0964, 0x0970}, - RuneRange{0x0982, 0x0983}, - RuneRange{0x0985, 0x098C}, - RuneRange{0x098F, 0x0990}, - RuneRange{0x0993, 0x09A8}, - RuneRange{0x09AA, 0x09B0}, - RuneRange{0x09B2, 0x09B2}, - RuneRange{0x09B6, 0x09B9}, - RuneRange{0x09BE, 0x09C0}, - RuneRange{0x09C7, 0x09C8}, - RuneRange{0x09CB, 0x09CC}, - RuneRange{0x09D7, 0x09D7}, - RuneRange{0x09DC, 0x09DD}, - RuneRange{0x09DF, 0x09E1}, - RuneRange{0x09E6, 0x09F1}, - RuneRange{0x09F4, 0x09FA}, - RuneRange{0x0A05, 0x0A0A}, - RuneRange{0x0A0F, 0x0A10}, - RuneRange{0x0A13, 0x0A28}, - RuneRange{0x0A2A, 0x0A30}, - RuneRange{0x0A32, 0x0A33}, - RuneRange{0x0A35, 0x0A36}, - RuneRange{0x0A38, 0x0A39}, - RuneRange{0x0A3E, 0x0A40}, - RuneRange{0x0A59, 0x0A5C}, - RuneRange{0x0A5E, 0x0A5E}, - RuneRange{0x0A66, 0x0A6F}, - RuneRange{0x0A72, 0x0A74}, - RuneRange{0x0A83, 0x0A83}, - RuneRange{0x0A85, 0x0A8B}, - RuneRange{0x0A8D, 0x0A8D}, - RuneRange{0x0A8F, 0x0A91}, - RuneRange{0x0A93, 0x0AA8}, - RuneRange{0x0AAA, 0x0AB0}, - RuneRange{0x0AB2, 0x0AB3}, - RuneRange{0x0AB5, 0x0AB9}, - RuneRange{0x0ABD, 0x0AC0}, - RuneRange{0x0AC9, 0x0AC9}, - RuneRange{0x0ACB, 0x0ACC}, - RuneRange{0x0AD0, 0x0AD0}, - RuneRange{0x0AE0, 0x0AE0}, - RuneRange{0x0AE6, 0x0AEF}, - RuneRange{0x0B02, 0x0B03}, - RuneRange{0x0B05, 0x0B0C}, - RuneRange{0x0B0F, 0x0B10}, - RuneRange{0x0B13, 0x0B28}, - RuneRange{0x0B2A, 0x0B30}, - RuneRange{0x0B32, 0x0B33}, - RuneRange{0x0B36, 0x0B39}, - RuneRange{0x0B3D, 0x0B3E}, - RuneRange{0x0B40, 0x0B40}, - RuneRange{0x0B47, 0x0B48}, - RuneRange{0x0B4B, 0x0B4C}, - RuneRange{0x0B57, 0x0B57}, - RuneRange{0x0B5C, 0x0B5D}, - RuneRange{0x0B5F, 0x0B61}, - RuneRange{0x0B66, 0x0B70}, - RuneRange{0x0B83, 0x0B83}, - RuneRange{0x0B85, 0x0B8A}, - RuneRange{0x0B8E, 0x0B90}, - RuneRange{0x0B92, 0x0B95}, - RuneRange{0x0B99, 0x0B9A}, - RuneRange{0x0B9C, 0x0B9C}, - RuneRange{0x0B9E, 0x0B9F}, - RuneRange{0x0BA3, 0x0BA4}, - RuneRange{0x0BA8, 0x0BAA}, - RuneRange{0x0BAE, 0x0BB5}, - RuneRange{0x0BB7, 0x0BB9}, - RuneRange{0x0BBE, 0x0BBF}, - RuneRange{0x0BC1, 0x0BC2}, - RuneRange{0x0BC6, 0x0BC8}, - RuneRange{0x0BCA, 0x0BCC}, - RuneRange{0x0BD7, 0x0BD7}, - RuneRange{0x0BE7, 0x0BF2}, - RuneRange{0x0C01, 0x0C03}, - RuneRange{0x0C05, 0x0C0C}, - RuneRange{0x0C0E, 0x0C10}, - RuneRange{0x0C12, 0x0C28}, - RuneRange{0x0C2A, 0x0C33}, - RuneRange{0x0C35, 0x0C39}, - RuneRange{0x0C41, 0x0C44}, - RuneRange{0x0C60, 0x0C61}, - RuneRange{0x0C66, 0x0C6F}, - RuneRange{0x0C82, 0x0C83}, - RuneRange{0x0C85, 0x0C8C}, - RuneRange{0x0C8E, 0x0C90}, - RuneRange{0x0C92, 0x0CA8}, - RuneRange{0x0CAA, 0x0CB3}, - RuneRange{0x0CB5, 0x0CB9}, - RuneRange{0x0CBE, 0x0CBE}, - RuneRange{0x0CC0, 0x0CC4}, - RuneRange{0x0CC7, 0x0CC8}, - RuneRange{0x0CCA, 0x0CCB}, - RuneRange{0x0CD5, 0x0CD6}, - RuneRange{0x0CDE, 0x0CDE}, - RuneRange{0x0CE0, 0x0CE1}, - RuneRange{0x0CE6, 0x0CEF}, - RuneRange{0x0D02, 0x0D03}, - RuneRange{0x0D05, 0x0D0C}, - RuneRange{0x0D0E, 0x0D10}, - RuneRange{0x0D12, 0x0D28}, - RuneRange{0x0D2A, 0x0D39}, - RuneRange{0x0D3E, 0x0D40}, - RuneRange{0x0D46, 0x0D48}, - RuneRange{0x0D4A, 0x0D4C}, - RuneRange{0x0D57, 0x0D57}, - RuneRange{0x0D60, 0x0D61}, - RuneRange{0x0D66, 0x0D6F}, - RuneRange{0x0D82, 0x0D83}, - RuneRange{0x0D85, 0x0D96}, - RuneRange{0x0D9A, 0x0DB1}, - RuneRange{0x0DB3, 0x0DBB}, - RuneRange{0x0DBD, 0x0DBD}, - RuneRange{0x0DC0, 0x0DC6}, - RuneRange{0x0DCF, 0x0DD1}, - RuneRange{0x0DD8, 0x0DDF}, - RuneRange{0x0DF2, 0x0DF4}, - RuneRange{0x0E01, 0x0E30}, - RuneRange{0x0E32, 0x0E33}, - RuneRange{0x0E40, 0x0E46}, - RuneRange{0x0E4F, 0x0E5B}, - RuneRange{0x0E81, 0x0E82}, - RuneRange{0x0E84, 0x0E84}, - RuneRange{0x0E87, 0x0E88}, - RuneRange{0x0E8A, 0x0E8A}, - RuneRange{0x0E8D, 0x0E8D}, - RuneRange{0x0E94, 0x0E97}, - RuneRange{0x0E99, 0x0E9F}, - RuneRange{0x0EA1, 0x0EA3}, - RuneRange{0x0EA5, 0x0EA5}, - RuneRange{0x0EA7, 0x0EA7}, - RuneRange{0x0EAA, 0x0EAB}, - RuneRange{0x0EAD, 0x0EB0}, - RuneRange{0x0EB2, 0x0EB3}, - RuneRange{0x0EBD, 0x0EBD}, - RuneRange{0x0EC0, 0x0EC4}, - RuneRange{0x0EC6, 0x0EC6}, - RuneRange{0x0ED0, 0x0ED9}, - RuneRange{0x0EDC, 0x0EDD}, - RuneRange{0x0F00, 0x0F17}, - RuneRange{0x0F1A, 0x0F34}, - RuneRange{0x0F36, 0x0F36}, - RuneRange{0x0F38, 0x0F38}, - RuneRange{0x0F3E, 0x0F47}, - RuneRange{0x0F49, 0x0F6A}, - RuneRange{0x0F7F, 0x0F7F}, - RuneRange{0x0F85, 0x0F85}, - RuneRange{0x0F88, 0x0F8B}, - RuneRange{0x0FBE, 0x0FC5}, - RuneRange{0x0FC7, 0x0FCC}, - RuneRange{0x0FCF, 0x0FCF}, - RuneRange{0x1000, 0x1021}, - RuneRange{0x1023, 0x1027}, - RuneRange{0x1029, 0x102A}, - RuneRange{0x102C, 0x102C}, - RuneRange{0x1031, 0x1031}, - RuneRange{0x1038, 0x1038}, - RuneRange{0x1040, 0x1057}, - RuneRange{0x10A0, 0x10C5}, - RuneRange{0x10D0, 0x10F8}, - RuneRange{0x10FB, 0x10FB}, - RuneRange{0x1100, 0x1159}, - RuneRange{0x115F, 0x11A2}, - RuneRange{0x11A8, 0x11F9}, - RuneRange{0x1200, 0x1206}, - RuneRange{0x1208, 0x1246}, - RuneRange{0x1248, 0x1248}, - RuneRange{0x124A, 0x124D}, - RuneRange{0x1250, 0x1256}, - RuneRange{0x1258, 0x1258}, - RuneRange{0x125A, 0x125D}, - RuneRange{0x1260, 0x1286}, - RuneRange{0x1288, 0x1288}, - RuneRange{0x128A, 0x128D}, - RuneRange{0x1290, 0x12AE}, - RuneRange{0x12B0, 0x12B0}, - RuneRange{0x12B2, 0x12B5}, - RuneRange{0x12B8, 0x12BE}, - RuneRange{0x12C0, 0x12C0}, - RuneRange{0x12C2, 0x12C5}, - RuneRange{0x12C8, 0x12CE}, - RuneRange{0x12D0, 0x12D6}, - RuneRange{0x12D8, 0x12EE}, - RuneRange{0x12F0, 0x130E}, - RuneRange{0x1310, 0x1310}, - RuneRange{0x1312, 0x1315}, - RuneRange{0x1318, 0x131E}, - RuneRange{0x1320, 0x1346}, - RuneRange{0x1348, 0x135A}, - RuneRange{0x1361, 0x137C}, - RuneRange{0x13A0, 0x13F4}, - RuneRange{0x1401, 0x1676}, - RuneRange{0x1681, 0x169A}, - RuneRange{0x16A0, 0x16F0}, - RuneRange{0x1700, 0x170C}, - RuneRange{0x170E, 0x1711}, - RuneRange{0x1720, 0x1731}, - RuneRange{0x1735, 0x1736}, - RuneRange{0x1740, 0x1751}, - RuneRange{0x1760, 0x176C}, - RuneRange{0x176E, 0x1770}, - RuneRange{0x1780, 0x17B6}, - RuneRange{0x17BE, 0x17C5}, - RuneRange{0x17C7, 0x17C8}, - RuneRange{0x17D4, 0x17DA}, - RuneRange{0x17DC, 0x17DC}, - RuneRange{0x17E0, 0x17E9}, - RuneRange{0x1810, 0x1819}, - RuneRange{0x1820, 0x1877}, - RuneRange{0x1880, 0x18A8}, - RuneRange{0x1E00, 0x1E9B}, - RuneRange{0x1EA0, 0x1EF9}, - RuneRange{0x1F00, 0x1F15}, - RuneRange{0x1F18, 0x1F1D}, - RuneRange{0x1F20, 0x1F45}, - RuneRange{0x1F48, 0x1F4D}, - RuneRange{0x1F50, 0x1F57}, - RuneRange{0x1F59, 0x1F59}, - RuneRange{0x1F5B, 0x1F5B}, - RuneRange{0x1F5D, 0x1F5D}, - RuneRange{0x1F5F, 0x1F7D}, - RuneRange{0x1F80, 0x1FB4}, - RuneRange{0x1FB6, 0x1FBC}, - RuneRange{0x1FBE, 0x1FBE}, - RuneRange{0x1FC2, 0x1FC4}, - RuneRange{0x1FC6, 0x1FCC}, - RuneRange{0x1FD0, 0x1FD3}, - RuneRange{0x1FD6, 0x1FDB}, - RuneRange{0x1FE0, 0x1FEC}, - RuneRange{0x1FF2, 0x1FF4}, - RuneRange{0x1FF6, 0x1FFC}, - RuneRange{0x200E, 0x200E}, - RuneRange{0x2071, 0x2071}, - RuneRange{0x207F, 0x207F}, - RuneRange{0x2102, 0x2102}, - RuneRange{0x2107, 0x2107}, - RuneRange{0x210A, 0x2113}, - RuneRange{0x2115, 0x2115}, - RuneRange{0x2119, 0x211D}, - RuneRange{0x2124, 0x2124}, - RuneRange{0x2126, 0x2126}, - RuneRange{0x2128, 0x2128}, - RuneRange{0x212A, 0x212D}, - RuneRange{0x212F, 0x2131}, - RuneRange{0x2133, 0x2139}, - RuneRange{0x213D, 0x213F}, - RuneRange{0x2145, 0x2149}, - RuneRange{0x2160, 0x2183}, - RuneRange{0x2336, 0x237A}, - RuneRange{0x2395, 0x2395}, - RuneRange{0x249C, 0x24E9}, - RuneRange{0x3005, 0x3007}, - RuneRange{0x3021, 0x3029}, - RuneRange{0x3031, 0x3035}, - RuneRange{0x3038, 0x303C}, - RuneRange{0x3041, 0x3096}, - RuneRange{0x309D, 0x309F}, - RuneRange{0x30A1, 0x30FA}, - RuneRange{0x30FC, 0x30FF}, - RuneRange{0x3105, 0x312C}, - RuneRange{0x3131, 0x318E}, - RuneRange{0x3190, 0x31B7}, - RuneRange{0x31F0, 0x321C}, - RuneRange{0x3220, 0x3243}, - RuneRange{0x3260, 0x327B}, - RuneRange{0x327F, 0x32B0}, - RuneRange{0x32C0, 0x32CB}, - RuneRange{0x32D0, 0x32FE}, - RuneRange{0x3300, 0x3376}, - RuneRange{0x337B, 0x33DD}, - RuneRange{0x33E0, 0x33FE}, - RuneRange{0x3400, 0x4DB5}, - RuneRange{0x4E00, 0x9FA5}, - RuneRange{0xA000, 0xA48C}, - RuneRange{0xAC00, 0xD7A3}, - RuneRange{0xD800, 0xFA2D}, - RuneRange{0xFA30, 0xFA6A}, - RuneRange{0xFB00, 0xFB06}, - RuneRange{0xFB13, 0xFB17}, - RuneRange{0xFF21, 0xFF3A}, - RuneRange{0xFF41, 0xFF5A}, - RuneRange{0xFF66, 0xFFBE}, - RuneRange{0xFFC2, 0xFFC7}, - RuneRange{0xFFCA, 0xFFCF}, - RuneRange{0xFFD2, 0xFFD7}, - RuneRange{0xFFDA, 0xFFDC}, - RuneRange{0x10300, 0x1031E}, - RuneRange{0x10320, 0x10323}, - RuneRange{0x10330, 0x1034A}, - RuneRange{0x10400, 0x10425}, - RuneRange{0x10428, 0x1044D}, - RuneRange{0x1D000, 0x1D0F5}, - RuneRange{0x1D100, 0x1D126}, - RuneRange{0x1D12A, 0x1D166}, - RuneRange{0x1D16A, 0x1D172}, - RuneRange{0x1D183, 0x1D184}, - RuneRange{0x1D18C, 0x1D1A9}, - RuneRange{0x1D1AE, 0x1D1DD}, - RuneRange{0x1D400, 0x1D454}, - RuneRange{0x1D456, 0x1D49C}, - RuneRange{0x1D49E, 0x1D49F}, - RuneRange{0x1D4A2, 0x1D4A2}, - RuneRange{0x1D4A5, 0x1D4A6}, - RuneRange{0x1D4A9, 0x1D4AC}, - RuneRange{0x1D4AE, 0x1D4B9}, - RuneRange{0x1D4BB, 0x1D4BB}, - RuneRange{0x1D4BD, 0x1D4C0}, - RuneRange{0x1D4C2, 0x1D4C3}, - RuneRange{0x1D4C5, 0x1D505}, - RuneRange{0x1D507, 0x1D50A}, - RuneRange{0x1D50D, 0x1D514}, - RuneRange{0x1D516, 0x1D51C}, - RuneRange{0x1D51E, 0x1D539}, - RuneRange{0x1D53B, 0x1D53E}, - RuneRange{0x1D540, 0x1D544}, - RuneRange{0x1D546, 0x1D546}, - RuneRange{0x1D54A, 0x1D550}, - RuneRange{0x1D552, 0x1D6A3}, - RuneRange{0x1D6A8, 0x1D7C9}, - RuneRange{0x20000, 0x2A6D6}, - RuneRange{0x2F800, 0x2FA1D}, - RuneRange{0xF0000, 0xFFFFD}, - RuneRange{0x100000, 0x10FFFD}, -} - -// TableD2 represents RFC-3454 Table D.2. -var TableD2 Set = tableD2 diff --git a/vendor/github.com/youmark/pkcs8/.gitignore b/vendor/github.com/youmark/pkcs8/.gitignore deleted file mode 100644 index 8365624..0000000 --- a/vendor/github.com/youmark/pkcs8/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/youmark/pkcs8/.travis.yml b/vendor/github.com/youmark/pkcs8/.travis.yml deleted file mode 100644 index 3608f7d..0000000 --- a/vendor/github.com/youmark/pkcs8/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -arch: - - amd64 - - ppc64le -language: go - -go: - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - master - -script: - - go test -v ./... diff --git a/vendor/github.com/youmark/pkcs8/LICENSE b/vendor/github.com/youmark/pkcs8/LICENSE deleted file mode 100644 index c939f44..0000000 --- a/vendor/github.com/youmark/pkcs8/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 youmark - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/youmark/pkcs8/README b/vendor/github.com/youmark/pkcs8/README deleted file mode 100644 index 376fcaf..0000000 --- a/vendor/github.com/youmark/pkcs8/README +++ /dev/null @@ -1 +0,0 @@ -pkcs8 package: implement PKCS#8 private key parsing and conversion as defined in RFC5208 and RFC5958 diff --git a/vendor/github.com/youmark/pkcs8/README.md b/vendor/github.com/youmark/pkcs8/README.md deleted file mode 100644 index ef6c762..0000000 --- a/vendor/github.com/youmark/pkcs8/README.md +++ /dev/null @@ -1,22 +0,0 @@ -pkcs8 -=== -OpenSSL can generate private keys in both "traditional format" and PKCS#8 format. Newer applications are advised to use more secure PKCS#8 format. Go standard crypto package provides a [function](http://golang.org/pkg/crypto/x509/#ParsePKCS8PrivateKey) to parse private key in PKCS#8 format. There is a limitation to this function. It can only handle unencrypted PKCS#8 private keys. To use this function, the user has to save the private key in file without encryption, which is a bad practice to leave private keys unprotected on file systems. In addition, Go standard package lacks the functions to convert RSA/ECDSA private keys into PKCS#8 format. - -pkcs8 package fills the gap here. It implements functions to process private keys in PKCS#8 format, as defined in [RFC5208](https://tools.ietf.org/html/rfc5208) and [RFC5958](https://tools.ietf.org/html/rfc5958). It can handle both unencrypted PKCS#8 PrivateKeyInfo format and EncryptedPrivateKeyInfo format with PKCS#5 (v2.0) algorithms. - - -[**Godoc**](http://godoc.org/github.com/youmark/pkcs8) - -## Installation -Supports Go 1.10+. Release v1.1 is the last release supporting Go 1.9 - -```text -go get github.com/youmark/pkcs8 -``` -## dependency -This package depends on golang.org/x/crypto/pbkdf2 and golang.org/x/crypto/scrypt packages. Use the following command to retrieve them -```text -go get golang.org/x/crypto/pbkdf2 -go get golang.org/x/crypto/scrypt -``` - diff --git a/vendor/github.com/youmark/pkcs8/cipher.go b/vendor/github.com/youmark/pkcs8/cipher.go deleted file mode 100644 index 2946c93..0000000 --- a/vendor/github.com/youmark/pkcs8/cipher.go +++ /dev/null @@ -1,60 +0,0 @@ -package pkcs8 - -import ( - "bytes" - "crypto/cipher" - "encoding/asn1" -) - -type cipherWithBlock struct { - oid asn1.ObjectIdentifier - ivSize int - keySize int - newBlock func(key []byte) (cipher.Block, error) -} - -func (c cipherWithBlock) IVSize() int { - return c.ivSize -} - -func (c cipherWithBlock) KeySize() int { - return c.keySize -} - -func (c cipherWithBlock) OID() asn1.ObjectIdentifier { - return c.oid -} - -func (c cipherWithBlock) Encrypt(key, iv, plaintext []byte) ([]byte, error) { - block, err := c.newBlock(key) - if err != nil { - return nil, err - } - return cbcEncrypt(block, key, iv, plaintext) -} - -func (c cipherWithBlock) Decrypt(key, iv, ciphertext []byte) ([]byte, error) { - block, err := c.newBlock(key) - if err != nil { - return nil, err - } - return cbcDecrypt(block, key, iv, ciphertext) -} - -func cbcEncrypt(block cipher.Block, key, iv, plaintext []byte) ([]byte, error) { - mode := cipher.NewCBCEncrypter(block, iv) - paddingLen := block.BlockSize() - (len(plaintext) % block.BlockSize()) - ciphertext := make([]byte, len(plaintext)+paddingLen) - copy(ciphertext, plaintext) - copy(ciphertext[len(plaintext):], bytes.Repeat([]byte{byte(paddingLen)}, paddingLen)) - mode.CryptBlocks(ciphertext, ciphertext) - return ciphertext, nil -} - -func cbcDecrypt(block cipher.Block, key, iv, ciphertext []byte) ([]byte, error) { - mode := cipher.NewCBCDecrypter(block, iv) - plaintext := make([]byte, len(ciphertext)) - mode.CryptBlocks(plaintext, ciphertext) - // TODO: remove padding - return plaintext, nil -} diff --git a/vendor/github.com/youmark/pkcs8/cipher_3des.go b/vendor/github.com/youmark/pkcs8/cipher_3des.go deleted file mode 100644 index 5629664..0000000 --- a/vendor/github.com/youmark/pkcs8/cipher_3des.go +++ /dev/null @@ -1,24 +0,0 @@ -package pkcs8 - -import ( - "crypto/des" - "encoding/asn1" -) - -var ( - oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} -) - -func init() { - RegisterCipher(oidDESEDE3CBC, func() Cipher { - return TripleDESCBC - }) -} - -// TripleDESCBC is the 168-bit key 3DES cipher in CBC mode. -var TripleDESCBC = cipherWithBlock{ - ivSize: des.BlockSize, - keySize: 24, - newBlock: des.NewTripleDESCipher, - oid: oidDESEDE3CBC, -} diff --git a/vendor/github.com/youmark/pkcs8/cipher_aes.go b/vendor/github.com/youmark/pkcs8/cipher_aes.go deleted file mode 100644 index c0372d1..0000000 --- a/vendor/github.com/youmark/pkcs8/cipher_aes.go +++ /dev/null @@ -1,84 +0,0 @@ -package pkcs8 - -import ( - "crypto/aes" - "encoding/asn1" -) - -var ( - oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - oidAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} - oidAES192CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 22} - oidAES192GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 26} - oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - oidAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} -) - -func init() { - RegisterCipher(oidAES128CBC, func() Cipher { - return AES128CBC - }) - RegisterCipher(oidAES128GCM, func() Cipher { - return AES128GCM - }) - RegisterCipher(oidAES192CBC, func() Cipher { - return AES192CBC - }) - RegisterCipher(oidAES192GCM, func() Cipher { - return AES192GCM - }) - RegisterCipher(oidAES256CBC, func() Cipher { - return AES256CBC - }) - RegisterCipher(oidAES256GCM, func() Cipher { - return AES256GCM - }) -} - -// AES128CBC is the 128-bit key AES cipher in CBC mode. -var AES128CBC = cipherWithBlock{ - ivSize: aes.BlockSize, - keySize: 16, - newBlock: aes.NewCipher, - oid: oidAES128CBC, -} - -// AES128GCM is the 128-bit key AES cipher in GCM mode. -var AES128GCM = cipherWithBlock{ - ivSize: aes.BlockSize, - keySize: 16, - newBlock: aes.NewCipher, - oid: oidAES128GCM, -} - -// AES192CBC is the 192-bit key AES cipher in CBC mode. -var AES192CBC = cipherWithBlock{ - ivSize: aes.BlockSize, - keySize: 24, - newBlock: aes.NewCipher, - oid: oidAES192CBC, -} - -// AES192GCM is the 912-bit key AES cipher in GCM mode. -var AES192GCM = cipherWithBlock{ - ivSize: aes.BlockSize, - keySize: 24, - newBlock: aes.NewCipher, - oid: oidAES192GCM, -} - -// AES256CBC is the 256-bit key AES cipher in CBC mode. -var AES256CBC = cipherWithBlock{ - ivSize: aes.BlockSize, - keySize: 32, - newBlock: aes.NewCipher, - oid: oidAES256CBC, -} - -// AES256GCM is the 256-bit key AES cipher in GCM mode. -var AES256GCM = cipherWithBlock{ - ivSize: aes.BlockSize, - keySize: 32, - newBlock: aes.NewCipher, - oid: oidAES256GCM, -} diff --git a/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go b/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go deleted file mode 100644 index 79697dd..0000000 --- a/vendor/github.com/youmark/pkcs8/kdf_pbkdf2.go +++ /dev/null @@ -1,91 +0,0 @@ -package pkcs8 - -import ( - "crypto" - "crypto/sha1" - "crypto/sha256" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "hash" - - "golang.org/x/crypto/pbkdf2" -) - -var ( - oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} - oidHMACWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 7} - oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} -) - -func init() { - RegisterKDF(oidPKCS5PBKDF2, func() KDFParameters { - return new(pbkdf2Params) - }) -} - -func newHashFromPRF(ai pkix.AlgorithmIdentifier) (func() hash.Hash, error) { - switch { - case len(ai.Algorithm) == 0 || ai.Algorithm.Equal(oidHMACWithSHA1): - return sha1.New, nil - case ai.Algorithm.Equal(oidHMACWithSHA256): - return sha256.New, nil - default: - return nil, errors.New("pkcs8: unsupported hash function") - } -} - -func newPRFParamFromHash(h crypto.Hash) (pkix.AlgorithmIdentifier, error) { - switch h { - case crypto.SHA1: - return pkix.AlgorithmIdentifier{ - Algorithm: oidHMACWithSHA1, - Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil - case crypto.SHA256: - return pkix.AlgorithmIdentifier{ - Algorithm: oidHMACWithSHA256, - Parameters: asn1.RawValue{Tag: asn1.TagNull}}, nil - } - return pkix.AlgorithmIdentifier{}, errors.New("pkcs8: unsupported hash function") -} - -type pbkdf2Params struct { - Salt []byte - IterationCount int - PRF pkix.AlgorithmIdentifier `asn1:"optional"` -} - -func (p pbkdf2Params) DeriveKey(password []byte, size int) (key []byte, err error) { - h, err := newHashFromPRF(p.PRF) - if err != nil { - return nil, err - } - return pbkdf2.Key(password, p.Salt, p.IterationCount, size, h), nil -} - -// PBKDF2Opts contains options for the PBKDF2 key derivation function. -type PBKDF2Opts struct { - SaltSize int - IterationCount int - HMACHash crypto.Hash -} - -func (p PBKDF2Opts) DeriveKey(password, salt []byte, size int) ( - key []byte, params KDFParameters, err error) { - - key = pbkdf2.Key(password, salt, p.IterationCount, size, p.HMACHash.New) - prfParam, err := newPRFParamFromHash(p.HMACHash) - if err != nil { - return nil, nil, err - } - params = pbkdf2Params{salt, p.IterationCount, prfParam} - return key, params, nil -} - -func (p PBKDF2Opts) GetSaltSize() int { - return p.SaltSize -} - -func (p PBKDF2Opts) OID() asn1.ObjectIdentifier { - return oidPKCS5PBKDF2 -} diff --git a/vendor/github.com/youmark/pkcs8/kdf_scrypt.go b/vendor/github.com/youmark/pkcs8/kdf_scrypt.go deleted file mode 100644 index 36c4f4f..0000000 --- a/vendor/github.com/youmark/pkcs8/kdf_scrypt.go +++ /dev/null @@ -1,62 +0,0 @@ -package pkcs8 - -import ( - "encoding/asn1" - - "golang.org/x/crypto/scrypt" -) - -var ( - oidScrypt = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 4, 11} -) - -func init() { - RegisterKDF(oidScrypt, func() KDFParameters { - return new(scryptParams) - }) -} - -type scryptParams struct { - Salt []byte - CostParameter int - BlockSize int - ParallelizationParameter int -} - -func (p scryptParams) DeriveKey(password []byte, size int) (key []byte, err error) { - return scrypt.Key(password, p.Salt, p.CostParameter, p.BlockSize, - p.ParallelizationParameter, size) -} - -// ScryptOpts contains options for the scrypt key derivation function. -type ScryptOpts struct { - SaltSize int - CostParameter int - BlockSize int - ParallelizationParameter int -} - -func (p ScryptOpts) DeriveKey(password, salt []byte, size int) ( - key []byte, params KDFParameters, err error) { - - key, err = scrypt.Key(password, salt, p.CostParameter, p.BlockSize, - p.ParallelizationParameter, size) - if err != nil { - return nil, nil, err - } - params = scryptParams{ - BlockSize: p.BlockSize, - CostParameter: p.CostParameter, - ParallelizationParameter: p.ParallelizationParameter, - Salt: salt, - } - return key, params, nil -} - -func (p ScryptOpts) GetSaltSize() int { - return p.SaltSize -} - -func (p ScryptOpts) OID() asn1.ObjectIdentifier { - return oidScrypt -} diff --git a/vendor/github.com/youmark/pkcs8/pkcs8.go b/vendor/github.com/youmark/pkcs8/pkcs8.go deleted file mode 100644 index f27f627..0000000 --- a/vendor/github.com/youmark/pkcs8/pkcs8.go +++ /dev/null @@ -1,309 +0,0 @@ -// Package pkcs8 implements functions to parse and convert private keys in PKCS#8 format, as defined in RFC5208 and RFC5958 -package pkcs8 - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" -) - -// DefaultOpts are the default options for encrypting a key if none are given. -// The defaults can be changed by the library user. -var DefaultOpts = &Opts{ - Cipher: AES256CBC, - KDFOpts: PBKDF2Opts{ - SaltSize: 8, - IterationCount: 10000, - HMACHash: crypto.SHA256, - }, -} - -// KDFOpts contains options for a key derivation function. -// An implementation of this interface must be specified when encrypting a PKCS#8 key. -type KDFOpts interface { - // DeriveKey derives a key of size bytes from the given password and salt. - // It returns the key and the ASN.1-encodable parameters used. - DeriveKey(password, salt []byte, size int) (key []byte, params KDFParameters, err error) - // GetSaltSize returns the salt size specified. - GetSaltSize() int - // OID returns the OID of the KDF specified. - OID() asn1.ObjectIdentifier -} - -// KDFParameters contains parameters (salt, etc.) for a key deriviation function. -// It must be a ASN.1-decodable structure. -// An implementation of this interface is created when decoding an encrypted PKCS#8 key. -type KDFParameters interface { - // DeriveKey derives a key of size bytes from the given password. - // It uses the salt from the decoded parameters. - DeriveKey(password []byte, size int) (key []byte, err error) -} - -var kdfs = make(map[string]func() KDFParameters) - -// RegisterKDF registers a function that returns a new instance of the given KDF -// parameters. This allows the library to support client-provided KDFs. -func RegisterKDF(oid asn1.ObjectIdentifier, params func() KDFParameters) { - kdfs[oid.String()] = params -} - -// Cipher represents a cipher for encrypting the key material. -type Cipher interface { - // IVSize returns the IV size of the cipher, in bytes. - IVSize() int - // KeySize returns the key size of the cipher, in bytes. - KeySize() int - // Encrypt encrypts the key material. - Encrypt(key, iv, plaintext []byte) ([]byte, error) - // Decrypt decrypts the key material. - Decrypt(key, iv, ciphertext []byte) ([]byte, error) - // OID returns the OID of the cipher specified. - OID() asn1.ObjectIdentifier -} - -var ciphers = make(map[string]func() Cipher) - -// RegisterCipher registers a function that returns a new instance of the given -// cipher. This allows the library to support client-provided ciphers. -func RegisterCipher(oid asn1.ObjectIdentifier, cipher func() Cipher) { - ciphers[oid.String()] = cipher -} - -// Opts contains options for encrypting a PKCS#8 key. -type Opts struct { - Cipher Cipher - KDFOpts KDFOpts -} - -// Unecrypted PKCS8 -var ( - oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} -) - -type encryptedPrivateKeyInfo struct { - EncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedData []byte -} - -type pbes2Params struct { - KeyDerivationFunc pkix.AlgorithmIdentifier - EncryptionScheme pkix.AlgorithmIdentifier -} - -type privateKeyInfo struct { - Version int - PrivateKeyAlgorithm pkix.AlgorithmIdentifier - PrivateKey []byte -} - -func parseKeyDerivationFunc(keyDerivationFunc pkix.AlgorithmIdentifier) (KDFParameters, error) { - oid := keyDerivationFunc.Algorithm.String() - newParams, ok := kdfs[oid] - if !ok { - return nil, fmt.Errorf("pkcs8: unsupported KDF (OID: %s)", oid) - } - params := newParams() - _, err := asn1.Unmarshal(keyDerivationFunc.Parameters.FullBytes, params) - if err != nil { - return nil, errors.New("pkcs8: invalid KDF parameters") - } - return params, nil -} - -func parseEncryptionScheme(encryptionScheme pkix.AlgorithmIdentifier) (Cipher, []byte, error) { - oid := encryptionScheme.Algorithm.String() - newCipher, ok := ciphers[oid] - if !ok { - return nil, nil, fmt.Errorf("pkcs8: unsupported cipher (OID: %s)", oid) - } - cipher := newCipher() - var iv []byte - if _, err := asn1.Unmarshal(encryptionScheme.Parameters.FullBytes, &iv); err != nil { - return nil, nil, errors.New("pkcs8: invalid cipher parameters") - } - return cipher, iv, nil -} - -// ParsePrivateKey parses a DER-encoded PKCS#8 private key. -// Password can be nil. -// This is equivalent to ParsePKCS8PrivateKey. -func ParsePrivateKey(der []byte, password []byte) (interface{}, KDFParameters, error) { - // No password provided, assume the private key is unencrypted - if len(password) == 0 { - privateKey, err := x509.ParsePKCS8PrivateKey(der) - return privateKey, nil, err - } - - // Use the password provided to decrypt the private key - var privKey encryptedPrivateKeyInfo - if _, err := asn1.Unmarshal(der, &privKey); err != nil { - return nil, nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") - } - - if !privKey.EncryptionAlgorithm.Algorithm.Equal(oidPBES2) { - return nil, nil, errors.New("pkcs8: only PBES2 supported") - } - - var params pbes2Params - if _, err := asn1.Unmarshal(privKey.EncryptionAlgorithm.Parameters.FullBytes, ¶ms); err != nil { - return nil, nil, errors.New("pkcs8: invalid PBES2 parameters") - } - - cipher, iv, err := parseEncryptionScheme(params.EncryptionScheme) - if err != nil { - return nil, nil, err - } - - kdfParams, err := parseKeyDerivationFunc(params.KeyDerivationFunc) - if err != nil { - return nil, nil, err - } - - keySize := cipher.KeySize() - symkey, err := kdfParams.DeriveKey(password, keySize) - if err != nil { - return nil, nil, err - } - - encryptedKey := privKey.EncryptedData - decryptedKey, err := cipher.Decrypt(symkey, iv, encryptedKey) - if err != nil { - return nil, nil, err - } - - key, err := x509.ParsePKCS8PrivateKey(decryptedKey) - if err != nil { - return nil, nil, errors.New("pkcs8: incorrect password") - } - return key, kdfParams, nil -} - -// MarshalPrivateKey encodes a private key into DER-encoded PKCS#8 with the given options. -// Password can be nil. -func MarshalPrivateKey(priv interface{}, password []byte, opts *Opts) ([]byte, error) { - if len(password) == 0 { - return x509.MarshalPKCS8PrivateKey(priv) - } - - if opts == nil { - opts = DefaultOpts - } - - // Convert private key into PKCS8 format - pkey, err := x509.MarshalPKCS8PrivateKey(priv) - if err != nil { - return nil, err - } - - encAlg := opts.Cipher - salt := make([]byte, opts.KDFOpts.GetSaltSize()) - _, err = rand.Read(salt) - if err != nil { - return nil, err - } - iv := make([]byte, encAlg.IVSize()) - _, err = rand.Read(iv) - if err != nil { - return nil, err - } - key, kdfParams, err := opts.KDFOpts.DeriveKey(password, salt, encAlg.KeySize()) - if err != nil { - return nil, err - } - - encryptedKey, err := encAlg.Encrypt(key, iv, pkey) - if err != nil { - return nil, err - } - - marshalledParams, err := asn1.Marshal(kdfParams) - if err != nil { - return nil, err - } - keyDerivationFunc := pkix.AlgorithmIdentifier{ - Algorithm: opts.KDFOpts.OID(), - Parameters: asn1.RawValue{FullBytes: marshalledParams}, - } - marshalledIV, err := asn1.Marshal(iv) - if err != nil { - return nil, err - } - encryptionScheme := pkix.AlgorithmIdentifier{ - Algorithm: encAlg.OID(), - Parameters: asn1.RawValue{FullBytes: marshalledIV}, - } - - encryptionAlgorithmParams := pbes2Params{ - EncryptionScheme: encryptionScheme, - KeyDerivationFunc: keyDerivationFunc, - } - marshalledEncryptionAlgorithmParams, err := asn1.Marshal(encryptionAlgorithmParams) - if err != nil { - return nil, err - } - encryptionAlgorithm := pkix.AlgorithmIdentifier{ - Algorithm: oidPBES2, - Parameters: asn1.RawValue{FullBytes: marshalledEncryptionAlgorithmParams}, - } - - encryptedPkey := encryptedPrivateKeyInfo{ - EncryptionAlgorithm: encryptionAlgorithm, - EncryptedData: encryptedKey, - } - - return asn1.Marshal(encryptedPkey) -} - -// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) { - var password []byte - if len(v) > 0 { - password = v[0] - } - privateKey, _, err := ParsePrivateKey(der, password) - return privateKey, err -} - -// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) - if err != nil { - return nil, err - } - typedKey, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("key block is not of type RSA") - } - return typedKey, nil -} - -// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. -func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) { - key, err := ParsePKCS8PrivateKey(der, v...) - if err != nil { - return nil, err - } - typedKey, ok := key.(*ecdsa.PrivateKey) - if !ok { - return nil, errors.New("key block is not of type ECDSA") - } - return typedKey, nil -} - -// ConvertPrivateKeyToPKCS8 converts the private key into PKCS#8 format. -// To encrypt the private key, the password of []byte type should be provided as the second parameter. -// -// The only supported key types are RSA and ECDSA (*rsa.PrivateKey or *ecdsa.PrivateKey for priv) -func ConvertPrivateKeyToPKCS8(priv interface{}, v ...[]byte) ([]byte, error) { - var password []byte - if len(v) > 0 { - password = v[0] - } - return MarshalPrivateKey(priv, password, nil) -} diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/go.mongodb.org/mongo-driver/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go deleted file mode 100644 index a0d8185..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer -// See THIRD-PARTY-NOTICES for original license terms. - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D = primitive.D - -// E represents a BSON element for a D. It is usually used inside a D. -type E = primitive.E - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M = primitive.M - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go deleted file mode 100644 index 4e24f9e..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ArrayCodec is the Codec used for bsoncore.Array values. -type ArrayCodec struct{} - -var defaultArrayCodec = NewArrayCodec() - -// NewArrayCodec returns an ArrayCodec. -func NewArrayCodec() *ArrayCodec { - return &ArrayCodec{} -} - -// EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreArray { - return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - arr := val.Interface().(bsoncore.Array) - return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) -} - -// DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreArray { - return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) - val.Set(reflect.ValueOf(arr)) - return err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go deleted file mode 100644 index 098ed69..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" - -import ( - "fmt" - "reflect" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ( - emptyValue = reflect.Value{} -) - -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -type Marshaler interface { - MarshalBSON() ([]byte, error) -} - -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. -type ValueMarshaler interface { - MarshalBSONValue() (bsontype.Type, []byte, error) -} - -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. -type Unmarshaler interface { - UnmarshalBSON([]byte) error -} - -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -type ValueUnmarshaler interface { - UnmarshalBSONValue(bsontype.Type, []byte) error -} - -// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be -// encoded by the ValueEncoder. -type ValueEncoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vee ValueEncoderError) Error() string { - typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) - for _, t := range vee.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vee.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vee.Received.Kind().String() - if vee.Received.IsValid() { - received = vee.Received.Type().String() - } - return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) -} - -// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be -// decoded by the ValueDecoder. -type ValueDecoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vde ValueDecoderError) Error() string { - typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) - for _, t := range vde.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vde.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vde.Received.Kind().String() - if vde.Received.IsValid() { - received = vde.Received.Type().String() - } - return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) -} - -// EncodeContext is the contextual information required for a Codec to encode a -// value. -type EncodeContext struct { - *Registry - MinSize bool -} - -// DecodeContext is the contextual information required for a Codec to decode a -// value. -type DecodeContext struct { - *Registry - Truncate bool - - // Ancestor is the type of a containing document. This is mainly used to determine what type - // should be used when decoding an embedded document into an empty interface. For example, if - // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface - // will be decoded into a bson.M. - // - // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. - Ancestor reflect.Type - - // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the - // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is - // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an - // error. DocumentType overrides the Ancestor field. - defaultDocumentType reflect.Type -} - -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". -func (dc *DecodeContext) DefaultDocumentM() { - dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) -} - -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". -func (dc *DecodeContext) DefaultDocumentD() { - dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) -} - -// ValueCodec is the interface that groups the methods to encode and decode -// values. -type ValueCodec interface { - ValueEncoder - ValueDecoder -} - -// ValueEncoder is the interface implemented by types that can handle the encoding of a value. -type ValueEncoder interface { - EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error -} - -// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueEncoder. -type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error - -// EncodeValue implements the ValueEncoder interface. -func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - return fn(ec, vw, val) -} - -// ValueDecoder is the interface implemented by types that can handle the decoding of a value. -type ValueDecoder interface { - DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error -} - -// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueDecoder. -type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error - -// DecodeValue implements the ValueDecoder interface. -func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - return fn(dc, vr, val) -} - -// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. -type typeDecoder interface { - decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) -} - -// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. -type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) - -func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - return fn(dc, vr, t) -} - -// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. -type decodeAdapter struct { - ValueDecoderFunc - typeDecoderFunc -} - -var _ ValueDecoder = decodeAdapter{} -var _ typeDecoder = decodeAdapter{} - -// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type -// t and calls decoder.DecodeValue on it. -func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - td, _ := decoder.(typeDecoder) - return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) -} - -func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { - if td != nil { - val, err := td.decodeType(dc, vr, t) - if err == nil && convert && val.Type() != t { - // This conversion step is necessary for slices and maps. If a user declares variables like: - // - // type myBool bool - // var m map[string]myBool - // - // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present - // because we'll try to assign a value of type bool to one of type myBool. - val = val.Convert(t) - } - return val, err - } - - val := reflect.New(t).Elem() - err := vd.DecodeValue(dc, vr, val) - return val, err -} - -// CodecZeroer is the interface implemented by Codecs that can also determine if -// a value of the type that would be encoded is zero. -type CodecZeroer interface { - IsTypeZero(interface{}) bool -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go deleted file mode 100644 index 5a916cc..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ByteSliceCodec is the Codec used for []byte values. -type ByteSliceCodec struct { - EncodeNilAsEmpty bool -} - -var ( - defaultByteSliceCodec = NewByteSliceCodec() - - _ ValueCodec = defaultByteSliceCodec - _ typeDecoder = defaultByteSliceCodec -) - -// NewByteSliceCodec returns a StringCodec with options opts. -func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { - byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) - codec := ByteSliceCodec{} - if byteSliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for []byte. -func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() && !bsc.EncodeNilAsEmpty { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tByteSlice { - return emptyValue, ValueDecoderError{ - Name: "ByteSliceDecodeValue", - Types: []reflect.Type{tByteSlice}, - Received: reflect.Zero(t), - } - } - - var data []byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - data = []byte(str) - case bsontype.Symbol: - sym, err := vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - data = []byte(sym) - case bsontype.Binary: - var subtype byte - data, subtype, err = vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} - } - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(data), nil -} - -// DecodeValue is the ValueDecoder for []byte. -func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - elem, err := bsc.decodeType(dc, vr, tByteSlice) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go deleted file mode 100644 index cb8180f..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" -) - -// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. -type condAddrEncoder struct { - canAddrEnc ValueEncoder - elseEnc ValueEncoder -} - -var _ ValueEncoder = (*condAddrEncoder)(nil) - -// newCondAddrEncoder returns an condAddrEncoder. -func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { - encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} - return &encoder -} - -// EncodeValue is the ValueEncoderFunc for a value that may be addressable. -func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.CanAddr() { - return cae.canAddrEnc.EncodeValue(ec, vw, val) - } - if cae.elseEnc != nil { - return cae.elseEnc.EncodeValue(ec, vw, val) - } - return ErrNoEncoder{Type: val.Type()} -} - -// condAddrDecoder is the decoder used when a pointer to the value has a decoder. -type condAddrDecoder struct { - canAddrDec ValueDecoder - elseDec ValueDecoder -} - -var _ ValueDecoder = (*condAddrDecoder)(nil) - -// newCondAddrDecoder returns an CondAddrDecoder. -func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { - decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} - return &decoder -} - -// DecodeValue is the ValueDecoderFunc for a value that may be addressable. -func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.CanAddr() { - return cad.canAddrDec.DecodeValue(dc, vr, val) - } - if cad.elseDec != nil { - return cad.elseDec.DecodeValue(dc, vr, val) - } - return ErrNoDecoder{Type: val.Type()} -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go deleted file mode 100644 index e95cab5..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ /dev/null @@ -1,1729 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var ( - defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") -) - -type decodeBinaryError struct { - subtype byte - typeName string -} - -func (d decodeBinaryError) Error() string { - return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) -} - -func newDefaultStructCodec() *StructCodec { - codec, err := NewStructCodec(DefaultStructTagParser) - if err != nil { - // This function is called from the codec registration path, so errors can't be propagated. If there's an error - // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) - } - return codec -} - -// DefaultValueDecoders is a namespace type for the default ValueDecoders used -// when creating a registry. -type DefaultValueDecoders struct{} - -// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with -// the provided RegistryBuilder. -// -// There is no support for decoding map[string]interface{} because there is no decoder for -// interface{}, so users must either register this decoder themselves or use the -// EmptyInterfaceDecoder available in the bson package. -func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) - } - - intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} - floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} - - rb. - RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). - RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). - RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). - RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). - RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). - RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). - RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). - RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). - RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). - RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). - RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). - RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). - RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeDecoder(tTime, defaultTimeCodec). - RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeDecoder(tCoreArray, defaultArrayCodec). - RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). - RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). - RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). - RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). - RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). - RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). - RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). - RegisterDefaultDecoder(reflect.Int, intDecoder). - RegisterDefaultDecoder(reflect.Int8, intDecoder). - RegisterDefaultDecoder(reflect.Int16, intDecoder). - RegisterDefaultDecoder(reflect.Int32, intDecoder). - RegisterDefaultDecoder(reflect.Int64, intDecoder). - RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Float32, floatDecoder). - RegisterDefaultDecoder(reflect.Float64, floatDecoder). - RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). - RegisterDefaultDecoder(reflect.Map, defaultMapCodec). - RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultDecoder(reflect.String, defaultStringCodec). - RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). - RegisterTypeMapEntry(bsontype.Double, tFloat64). - RegisterTypeMapEntry(bsontype.String, tString). - RegisterTypeMapEntry(bsontype.Array, tA). - RegisterTypeMapEntry(bsontype.Binary, tBinary). - RegisterTypeMapEntry(bsontype.Undefined, tUndefined). - RegisterTypeMapEntry(bsontype.ObjectID, tOID). - RegisterTypeMapEntry(bsontype.Boolean, tBool). - RegisterTypeMapEntry(bsontype.DateTime, tDateTime). - RegisterTypeMapEntry(bsontype.Regex, tRegex). - RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). - RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). - RegisterTypeMapEntry(bsontype.Symbol, tSymbol). - RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). - RegisterTypeMapEntry(bsontype.Int32, tInt32). - RegisterTypeMapEntry(bsontype.Int64, tInt64). - RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). - RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). - RegisterTypeMapEntry(bsontype.MinKey, tMinKey). - RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). - RegisterTypeMapEntry(bsontype.Type(0), tD). - RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). - RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). - RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) -} - -// DDecodeValue is the ValueDecoderFunc for primitive.D instances. -func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Type() != tD { - return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - dc.Ancestor = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a primitive.D", vrType) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return err - } - tEmptyTypeDecoder, _ := decoder.(typeDecoder) - - // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. - var elems primitive.D - if !val.IsNil() { - val.SetLen(0) - elems = val.Interface().(primitive.D) - } else { - elems = make(primitive.D, 0) - } - - for { - key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } else if err != nil { - return err - } - - // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. - elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) - if err != nil { - return err - } - - elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) - } - - val.Set(reflect.ValueOf(elems)) - return nil -} - -func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.Bool { - return emptyValue, ValueDecoderError{ - Name: "BooleanDecodeValue", - Kinds: []reflect.Kind{reflect.Bool}, - Received: reflect.Zero(t), - } - } - - var b bool - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - b = (i32 != 0) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - b = (i64 != 0) - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - b = (f64 != 0) - case bsontype.Boolean: - b, err = vr.ReadBoolean() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(b), nil -} - -// BooleanDecodeValue is the ValueDecoderFunc for bool types. -func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { - return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - - elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetBool(elem.Bool()) - return nil -} - -func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Int8: - if i64 < math.MinInt8 || i64 > math.MaxInt8 { - return emptyValue, fmt.Errorf("%d overflows int8", i64) - } - - return reflect.ValueOf(int8(i64)), nil - case reflect.Int16: - if i64 < math.MinInt16 || i64 > math.MaxInt16 { - return emptyValue, fmt.Errorf("%d overflows int16", i64) - } - - return reflect.ValueOf(int16(i64)), nil - case reflect.Int32: - if i64 < math.MinInt32 || i64 > math.MaxInt32 { - return emptyValue, fmt.Errorf("%d overflows int32", i64) - } - - return reflect.ValueOf(int32(i64)), nil - case reflect.Int64: - return reflect.ValueOf(i64), nil - case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int - return emptyValue, fmt.Errorf("%d overflows int", i64) - } - - return reflect.ValueOf(int(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: reflect.Zero(t), - } - } -} - -// IntDecodeValue is the ValueDecoderFunc for int types. -func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } - } - - elem, err := dvd.intDecodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetInt(elem.Int()) - return nil -} - -// UintDecodeValue is the ValueDecoderFunc for uint types. -// -// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var i64 int64 - var err error - switch vr.Type() { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") - } - if f64 > float64(math.MaxInt64) { - return fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - if b { - i64 = 1 - } - default: - return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) - } - - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - switch val.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return fmt.Errorf("%d overflows uint8", i64) - } - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return fmt.Errorf("%d overflows uint16", i64) - } - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return fmt.Errorf("%d overflows uint32", i64) - } - case reflect.Uint64: - if i64 < 0 { - return fmt.Errorf("%d overflows uint64", i64) - } - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return fmt.Errorf("%d overflows uint", i64) - } - default: - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - val.SetUint(uint64(i64)) - return nil -} - -func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var f float64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - f = float64(i32) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - f = float64(i64) - case bsontype.Double: - f, err = vr.ReadDouble() - if err != nil { - return emptyValue, err - } - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - f = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) - } - - switch t.Kind() { - case reflect.Float32: - if !ec.Truncate && float64(float32(f)) != f { - return emptyValue, errCannotTruncate - } - - return reflect.ValueOf(float32(f)), nil - case reflect.Float64: - return reflect.ValueOf(f), nil - default: - return emptyValue, ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: reflect.Zero(t), - } - } -} - -// FloatDecodeValue is the ValueDecoderFunc for float types. -func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: val, - } - } - - elem, err := dvd.floatDecodeType(ec, vr, val.Type()) - if err != nil { - return err - } - - val.SetFloat(elem.Float()) - return nil -} - -// StringDecodeValue is the ValueDecoderFunc for string types. -// -// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var str string - var err error - switch vr.Type() { - // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return err - } - default: - return fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - val.SetString(str) - return nil -} - -func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJavaScript { - return emptyValue, ValueDecoderError{ - Name: "JavaScriptDecodeValue", - Types: []reflect.Type{tJavaScript}, - Received: reflect.Zero(t), - } - } - - var js string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.JavaScript: - js, err = vr.ReadJavascript() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.JavaScript(js)), nil -} - -// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. -func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJavaScript { - return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tSymbol { - return emptyValue, ValueDecoderError{ - Name: "SymbolDecodeValue", - Types: []reflect.Type{tSymbol}, - Received: reflect.Zero(t), - } - } - - var symbol string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - symbol, err = vr.ReadString() - case bsontype.Symbol: - symbol, err = vr.ReadSymbol() - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} - } - symbol = string(data) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Symbol(symbol)), nil -} - -// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. -func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tSymbol { - return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tBinary { - return emptyValue, ValueDecoderError{ - Name: "BinaryDecodeValue", - Types: []reflect.Type{tBinary}, - Received: reflect.Zero(t), - } - } - - var data []byte - var subtype byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Binary: - data, subtype, err = vr.ReadBinary() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil -} - -// BinaryDecodeValue is the ValueDecoderFunc for Binary. -func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tBinary { - return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - - elem, err := dvd.binaryDecodeType(dc, vr, tBinary) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tUndefined { - return emptyValue, ValueDecoderError{ - Name: "UndefinedDecodeValue", - Types: []reflect.Type{tUndefined}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Undefined{}), nil -} - -// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. -func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tUndefined { - return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tOID { - return emptyValue, ValueDecoderError{ - Name: "ObjectIDDecodeValue", - Types: []reflect.Type{tOID}, - Received: reflect.Zero(t), - } - } - - var oid primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.ObjectID: - oid, err = vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - if oid, err = primitive.ObjectIDFromHex(str); err == nil { - break - } - if len(str) != 12 { - return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) - } - byteArr := []byte(str) - copy(oid[:], byteArr) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) - } - - return reflect.ValueOf(oid), nil -} - -// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. -func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tOID { - return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} - } - - elem, err := dvd.objectIDDecodeType(dc, vr, tOID) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDateTime { - return emptyValue, ValueDecoderError{ - Name: "DateTimeDecodeValue", - Types: []reflect.Type{tDateTime}, - Received: reflect.Zero(t), - } - } - - var dt int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err = vr.ReadDateTime() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DateTime(dt)), nil -} - -// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. -func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDateTime { - return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tNull { - return emptyValue, ValueDecoderError{ - Name: "NullDecodeValue", - Types: []reflect.Type{tNull}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Null{}), nil -} - -// NullDecodeValue is the ValueDecoderFunc for Null. -func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tNull { - return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - elem, err := dvd.nullDecodeType(dc, vr, tNull) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tRegex { - return emptyValue, ValueDecoderError{ - Name: "RegexDecodeValue", - Types: []reflect.Type{tRegex}, - Received: reflect.Zero(t), - } - } - - var pattern, options string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Regex: - pattern, options, err = vr.ReadRegex() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil -} - -// RegexDecodeValue is the ValueDecoderFunc for Regex. -func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tRegex { - return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - elem, err := dvd.regexDecodeType(dc, vr, tRegex) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDBPointer { - return emptyValue, ValueDecoderError{ - Name: "DBPointerDecodeValue", - Types: []reflect.Type{tDBPointer}, - Received: reflect.Zero(t), - } - } - - var ns string - var pointer primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DBPointer: - ns, pointer, err = vr.ReadDBPointer() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil -} - -// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. -func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDBPointer { - return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { - if reflectType != tTimestamp { - return emptyValue, ValueDecoderError{ - Name: "TimestampDecodeValue", - Types: []reflect.Type{tTimestamp}, - Received: reflect.Zero(reflectType), - } - } - - var t, incr uint32 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Timestamp: - t, incr, err = vr.ReadTimestamp() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil -} - -// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. -func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTimestamp { - return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMinKey { - return emptyValue, ValueDecoderError{ - Name: "MinKeyDecodeValue", - Types: []reflect.Type{tMinKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MinKey: - err = vr.ReadMinKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MinKey{}), nil -} - -// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. -func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMinKey { - return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMaxKey { - return emptyValue, ValueDecoderError{ - Name: "MaxKeyDecodeValue", - Types: []reflect.Type{tMaxKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MaxKey: - err = vr.ReadMaxKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MaxKey{}), nil -} - -// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. -func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMaxKey { - return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDecimal { - return emptyValue, ValueDecoderError{ - Name: "Decimal128DecodeValue", - Types: []reflect.Type{tDecimal}, - Received: reflect.Zero(t), - } - } - - var d128 primitive.Decimal128 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Decimal128: - d128, err = vr.ReadDecimal128() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(d128), nil -} - -// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. -func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDecimal { - return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - - elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJSONNumber { - return emptyValue, ValueDecoderError{ - Name: "JSONNumberDecodeValue", - Types: []reflect.Type{tJSONNumber}, - Received: reflect.Zero(t), - } - } - - var jsonNum json.Number - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(i64, 10)) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(jsonNum), nil -} - -// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. -func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJSONNumber { - return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - - elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tURL { - return emptyValue, ValueDecoderError{ - Name: "URLDecodeValue", - Types: []reflect.Type{tURL}, - Received: reflect.Zero(t), - } - } - - urlPtr := &url.URL{} - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - var str string // Declare str here to avoid shadowing err during the ReadString call. - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - - urlPtr, err = url.Parse(str) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(urlPtr).Elem(), nil -} - -// URLDecodeValue is the ValueDecoderFunc for url.URL. -func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tURL { - return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} - } - - elem, err := dvd.urlDecodeType(dc, vr, tURL) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// TimeDecodeValue is the ValueDecoderFunc for time.Time. -// -// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.DateTime { - return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) - } - - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) - return nil -} - -// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. -// -// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { - return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) - } - - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != 0x00 { - return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) - } - - val.Set(reflect.ValueOf(data)) - return nil -} - -// MapDecodeValue is the ValueDecoderFunc for map[string]* types. -// -// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - elem := reflect.New(eType).Elem() - - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) - } - return nil -} - -// ArrayDecodeValue is the ValueDecoderFunc for array types. -func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if len(data) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) - } - - for idx, elem := range data { - val.Index(idx).Set(reflect.ValueOf(elem)) - } - return nil - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into an array", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if len(elems) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) - } - - for idx, elem := range elems { - val.Index(idx).Set(elem) - } - - return nil -} - -// SliceDecodeValue is the ValueDecoderFunc for slice types. -// -// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vr.Type() { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - default: - return fmt.Errorf("cannot decode %v into a slice", vr.Type()) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} - -// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - if !val.Type().Implements(tValueUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) - } - return nil -} - -// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - // If the target Go value is a pointer and the BSON field value is empty, set the value to the - // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to - // change the pointer value from within the function (only the value at the pointer address), - // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON - // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches - // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and - // the JSON field value is "null". - if val.Kind() == reflect.Ptr && len(src) == 0 { - val.Set(reflect.Zero(val.Type())) - return nil - } - - if !val.Type().Implements(tUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) - } - return nil -} - -// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - rtype, err := dc.LookupTypeMapEntry(vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.EmbeddedDocument: - if dc.Ancestor != nil { - rtype = dc.Ancestor - break - } - rtype = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return err - } - - elem := reflect.New(rtype).Elem() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreDocument { - return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - - cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) - val.Set(reflect.ValueOf(cdoc)) - return err -} - -func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { - elems := make([]reflect.Value, 0) - - ar, err := vr.ReadArray() - if err != nil { - return nil, err - } - - eType := val.Type().Elem() - - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return nil, err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - idx := 0 - for { - vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { - break - } - if err != nil { - return nil, err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return nil, newDecodeError(strconv.Itoa(idx), err) - } - elems = append(elems, elem) - idx++ - } - - return elems, nil -} - -func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { - var cws primitive.CodeWithScope - - code, dr, err := vr.ReadCodeWithScope() - if err != nil { - return cws, err - } - - scope := reflect.New(tD).Elem() - elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) - if err != nil { - return cws, err - } - - scope.Set(reflect.MakeSlice(tD, 0, len(elems))) - scope.Set(reflect.Append(scope, elems...)) - - cws = primitive.CodeWithScope{ - Code: primitive.JavaScript(code), - Scope: scope.Interface().(primitive.D), - } - return cws, nil -} - -func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tCodeWithScope { - return emptyValue, ValueDecoderError{ - Name: "CodeWithScopeDecodeValue", - Types: []reflect.Type{tCodeWithScope}, - Received: reflect.Zero(t), - } - } - - var cws primitive.CodeWithScope - var err error - switch vrType := vr.Type(); vrType { - case bsontype.CodeWithScope: - cws, err = dvd.readCodeWithScope(dc, vr) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(cws), nil -} - -// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. -func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCodeWithScope { - return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - default: - return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return nil, err - } - - return dvd.decodeElemsFromDocumentReader(dc, dr) -} - -func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return nil, err - } - - elems := make([]reflect.Value, 0) - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return nil, err - } - - val := reflect.New(tEmpty).Elem() - err = decoder.DecodeValue(dc, vr, val) - if err != nil { - return nil, newDecodeError(key, err) - } - - elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) - } - - return elems, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go deleted file mode 100644 index 6bdb43c..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ /dev/null @@ -1,766 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var defaultValueEncoders DefaultValueEncoders - -var bvwPool = bsonrw.NewBSONValueWriterPool() - -var errInvalidValue = errors.New("cannot encode invalid element") - -var sliceWriterPool = sync.Pool{ - New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0) - return &sw - }, -} - -func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { - vw, err := dw.WriteDocumentElement(e.Key) - if err != nil { - return err - } - - if e.Value == nil { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) - if err != nil { - return err - } - return nil -} - -// DefaultValueEncoders is a namespace type for the default ValueEncoders used -// when creating a registry. -type DefaultValueEncoders struct{} - -// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with -// the provided RegistryBuilder. -func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) - } - rb. - RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeEncoder(tTime, defaultTimeCodec). - RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeEncoder(tCoreArray, defaultArrayCodec). - RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). - RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). - RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). - RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). - RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). - RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). - RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). - RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). - RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). - RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). - RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). - RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). - RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). - RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). - RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). - RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). - RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). - RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). - RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). - RegisterDefaultEncoder(reflect.Map, defaultMapCodec). - RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultEncoder(reflect.String, defaultStringCodec). - RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). - RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). - RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). - RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) -} - -// BooleanEncodeValue is the ValueEncoderFunc for bool types. -func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Bool { - return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - return vw.WriteBoolean(val.Bool()) -} - -func fitsIn32Bits(i int64) bool { - return math.MinInt32 <= i && i <= math.MaxInt32 -} - -// IntEncodeValue is the ValueEncoderFunc for int types. -func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32: - return vw.WriteInt32(int32(val.Int())) - case reflect.Int: - i64 := val.Int() - if fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - case reflect.Int64: - i64 := val.Int() - if ec.MinSize && fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - } - - return ValueEncoderError{ - Name: "IntEncodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } -} - -// UintEncodeValue is the ValueEncoderFunc for uint types. -// -// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. -func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - if ec.MinSize && u64 <= math.MaxInt32 { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -// FloatEncodeValue is the ValueEncoderFunc for float types. -func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Float32, reflect.Float64: - return vw.WriteDouble(val.Float()) - } - - return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} -} - -// StringEncodeValue is the ValueEncoderFunc for string types. -// -// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tOID { - return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} - } - return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) -} - -// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDecimal { - return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) -} - -// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. -func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJSONNumber { - return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - jsnum := val.Interface().(json.Number) - - // Attempt int first, then float64 - if i64, err := jsnum.Int64(); err == nil { - return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) - } - - f64, err := jsnum.Float64() - if err != nil { - return err - } - - return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) -} - -// URLEncodeValue is the ValueEncoderFunc for url.URL. -func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tURL { - return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} - } - u := val.Interface().(url.URL) - return vw.WriteString(u.String()) -} - -// TimeEncodeValue is the ValueEncoderFunc for time.TIme. -// -// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} - -// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. -// -// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -// MapEncodeValue is the ValueEncoderFunc for map[string]* types. -// -// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. -func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() { - // If we have a nill map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return dve.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - if collisionFn != nil && collisionFn(key.String()) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(key.String()) - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// ArrayEncodeValue is the ValueEncoderFunc for array types. -func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().Elem() == tE { - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - e := val.Index(idx).Interface().(primitive.E) - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// SliceEncodeValue is the ValueEncoderFunc for slice types. -// -// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { - if origEncoder != nil || (currVal.Kind() != reflect.Interface) { - return origEncoder, currVal, nil - } - currVal = currVal.Elem() - if !currVal.IsValid() { - return nil, currVal, errInvalidValue - } - currEncoder, err := ec.LookupEncoder(currVal.Type()) - - return currEncoder, currVal, err -} - -// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement ValueMarshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - case val.Type().Implements(tValueMarshaler): - // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tValueMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - } - - fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") - returns := fn.Call(nil) - if !returns[2].IsNil() { - return returns[2].Interface().(error) - } - t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) - return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) -} - -// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Marshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - case val.Type().Implements(tMarshaler): - // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - } - - fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) - } - data := returns[0].Interface().([]byte) - return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) -} - -// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. -func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Proxy - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - case val.Type().Implements(tProxy): - // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tProxy) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - } - - fn := val.Convert(tProxy).MethodByName("ProxyBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) - } - data := returns[0] - var encoder ValueEncoder - var err error - if data.Elem().IsValid() { - encoder, err = ec.LookupEncoder(data.Elem().Type()) - } else { - encoder, err = ec.LookupEncoder(nil) - } - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, data.Elem()) -} - -// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJavaScript { - return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - return vw.WriteJavascript(val.String()) -} - -// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tSymbol { - return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - return vw.WriteSymbol(val.String()) -} - -// BinaryEncodeValue is the ValueEncoderFunc for Binary. -func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tBinary { - return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - b := val.Interface().(primitive.Binary) - - return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) -} - -// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tUndefined { - return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - return vw.WriteUndefined() -} - -// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDateTime { - return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - return vw.WriteDateTime(val.Int()) -} - -// NullEncodeValue is the ValueEncoderFunc for Null. -func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tNull { - return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - return vw.WriteNull() -} - -// RegexEncodeValue is the ValueEncoderFunc for Regex. -func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tRegex { - return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - regex := val.Interface().(primitive.Regex) - - return vw.WriteRegex(regex.Pattern, regex.Options) -} - -// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDBPointer { - return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - dbp := val.Interface().(primitive.DBPointer) - - return vw.WriteDBPointer(dbp.DB, dbp.Pointer) -} - -// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTimestamp { - return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - ts := val.Interface().(primitive.Timestamp) - - return vw.WriteTimestamp(ts.T, ts.I) -} - -// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMinKey { - return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - return vw.WriteMinKey() -} - -// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMaxKey { - return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - return vw.WriteMaxKey() -} - -// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreDocument { - return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - cdoc := val.Interface().(bsoncore.Document) - - return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) -} - -// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. -func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCodeWithScope { - return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - cws := val.Interface().(primitive.CodeWithScope) - - dw, err := vw.WriteCodeWithScope(string(cws.Code)) - if err != nil { - return err - } - - sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) - defer sliceWriterPool.Put(sw) - *sw = (*sw)[:0] - - scopeVW := bvwPool.Get(sw) - defer bvwPool.Put(scopeVW) - - encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) - if err != nil { - return err - } - - err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) - if err != nil { - return err - } - return dw.WriteDocumentEnd() -} - -// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type -func isImplementationNil(val reflect.Value, inter reflect.Type) bool { - vt := val.Type() - for vt.Kind() == reflect.Ptr { - vt = vt.Elem() - } - return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go deleted file mode 100644 index 5f903eb..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsoncodec provides a system for encoding values to BSON representations and decoding -// values from BSON representations. This package considers both binary BSON and ExtendedJSON as -// BSON representations. The types in this package enable a flexible system for handling this -// encoding and decoding. -// -// The codec system is composed of two parts: -// -// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON -// representations. -// -// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for -// retrieving them. -// -// # ValueEncoders and ValueDecoders -// -// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. -// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the -// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc -// is provided to allow use of a function with the correct signature as a ValueEncoder. An -// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and -// to provide configuration information. -// -// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that -// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to -// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext -// instance is provided and serves similar functionality to the EncodeContext. -// -// # Registry and RegistryBuilder -// -// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a -// RegistryBuilder, which handles three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. -// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the -// interface, but not for values with concrete types that implement the interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. -// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values -// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will -// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete -// type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when -// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 -// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would -// change the behavior so these values decode as Go int instances instead: -// -// intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) -// -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder -// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the -// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. -// These methods should be used to change the behavior for all values for a specific kind. -// -// # Registry Lookup Procedure -// -// When looking up an encoder in a Registry, the precedence rules are as follows: -// -// 1. A type encoder registered for the exact type of the value. -// -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the -// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first -// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined -// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those -// will take precedence over any new hooks. -// -// 3. A kind encoder registered for the value's kind. -// -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence -// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is -// found. -// -// # DefaultValueEncoders and DefaultValueDecoders -// -// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and -// ValueDecoders for handling a wide range of Go types, including all of the types within the -// primitive package. To make registering these codecs easier, a helper method on each type is -// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for -// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also -// handles registering type map entries for each BSON type. -package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go deleted file mode 100644 index eda417c..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// EmptyInterfaceCodec is the Codec used for interface{} values. -type EmptyInterfaceCodec struct { - DecodeBinaryAsSlice bool -} - -var ( - defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - - _ ValueCodec = defaultEmptyInterfaceCodec - _ typeDecoder = defaultEmptyInterfaceCodec -) - -// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. -func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { - interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) - - codec := EmptyInterfaceCodec{} - if interfaceOpt.DecodeBinaryAsSlice != nil { - codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice - } - return &codec -} - -// EncodeValue is the ValueEncoderFunc for interface{}. -func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { - isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument { - if dc.defaultDocumentType != nil { - // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return - // that type. - return dc.defaultDocumentType, nil - } - if dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil - } - } - - rtype, err := dc.LookupTypeMapEntry(valueType) - if err == nil { - return rtype, nil - } - - if isDocument { - // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, - // depending on the original valueType. - var lookupType bsontype.Type - switch valueType { - case bsontype.Type(0): - lookupType = bsontype.EmbeddedDocument - case bsontype.EmbeddedDocument: - lookupType = bsontype.Type(0) - } - - rtype, err = dc.LookupTypeMapEntry(lookupType) - if err == nil { - return rtype, nil - } - } - - return nil, err -} - -func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tEmpty { - return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} - } - - rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.Null: - return reflect.Zero(t), vr.ReadNull() - default: - return emptyValue, err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return emptyValue, err - } - - elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) - if err != nil { - return emptyValue, err - } - - if eic.DecodeBinaryAsSlice && rtype == tBinary { - binElem := elem.Interface().(primitive.Binary) - if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { - elem = reflect.ValueOf(binElem.Data) - } - } - - return elem, nil -} - -// DecodeValue is the ValueDecoderFunc for interface{}. -func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - elem, err := eic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go deleted file mode 100644 index e1fbef9..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding" - "fmt" - "reflect" - "strconv" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var defaultMapCodec = NewMapCodec() - -// MapCodec is the Codec used for map values. -type MapCodec struct { - DecodeZerosMap bool - EncodeNilAsEmpty bool - EncodeKeysWithStringer bool -} - -var _ ValueCodec = &MapCodec{} - -// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. -// This applies to types used as map keys and is similar to encoding.TextMarshaler. -type KeyMarshaler interface { - MarshalKey() (key string, err error) -} - -// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation -// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. -// -// UnmarshalKey must be able to decode the form generated by MarshalKey. -// UnmarshalKey must copy the text if it wishes to retain the text -// after returning. -type KeyUnmarshaler interface { - UnmarshalKey(key string) error -} - -// NewMapCodec returns a MapCodec with options opts. -func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { - mapOpt := bsonoptions.MergeMapCodecOptions(opts...) - - codec := MapCodec{} - if mapOpt.DecodeZerosMap != nil { - codec.DecodeZerosMap = *mapOpt.DecodeZerosMap - } - if mapOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty - } - if mapOpt.EncodeKeysWithStringer != nil { - codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer - } - return &codec -} - -// EncodeValue is the ValueEncoder for map[*]* types. -func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() && !mc.EncodeNilAsEmpty { - // If we have a nil map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return mc.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - keyStr, err := mc.encodeKey(key) - if err != nil { - return err - } - - if collisionFn != nil && collisionFn(keyStr) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(keyStr) - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// DecodeValue is the ValueDecoder for map[string/decimal]* types. -func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - if val.Len() > 0 && mc.DecodeZerosMap { - clearMap(val) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - k, err := mc.decodeKey(key, keyType) - if err != nil { - return err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return newDecodeError(key, err) - } - - val.SetMapIndex(k, elem) - } - return nil -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { - if mc.EncodeKeysWithStringer { - return fmt.Sprint(val), nil - } - - // keys of any string type are used directly - if val.Kind() == reflect.String { - return val.String(), nil - } - // KeyMarshalers are marshaled - if km, ok := val.Interface().(KeyMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - buf, err := km.MarshalKey() - if err == nil { - return buf, nil - } - return "", err - } - // keys implement encoding.TextMarshaler are marshaled. - if km, ok := val.Interface().(encoding.TextMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - - buf, err := km.MarshalText() - if err != nil { - return "", err - } - - return string(buf), nil - } - - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(val.Uint(), 10), nil - } - return "", fmt.Errorf("unsupported key type: %v", val.Type()) -} - -var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - -func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { - keyVal := reflect.ValueOf(key) - var err error - switch { - // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler - case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(KeyUnmarshaler) - err = v.UnmarshalKey(key) - keyVal = keyVal.Elem() - // Try to decode encoding.TextUnmarshalers. - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(encoding.TextUnmarshaler) - err = v.UnmarshalText([]byte(key)) - keyVal = keyVal.Elem() - // Otherwise, go to type specific behavior - default: - switch keyType.Kind() { - case reflect.String: - keyVal = reflect.ValueOf(key).Convert(keyType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, parseErr := strconv.ParseInt(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, parseErr := strconv.ParseUint(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - break - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Float32, reflect.Float64: - if mc.EncodeKeysWithStringer { - parsed, err := strconv.ParseFloat(key, 64) - if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) - } - keyVal = reflect.ValueOf(parsed) - break - } - fallthrough - default: - return keyVal, fmt.Errorf("unsupported key type: %v", keyType) - } - } - return keyVal, err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go deleted file mode 100644 index fbd9f0a..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import "fmt" - -type mode int - -const ( - _ mode = iota - mTopLevel - mDocument - mArray - mValue - mElement - mCodeWithScope - mSpacer -) - -func (m mode) String() string { - var str string - - switch m { - case mTopLevel: - str = "TopLevel" - case mDocument: - str = "DocumentMode" - case mArray: - str = "ArrayMode" - case mValue: - str = "ValueMode" - case mElement: - str = "ElementMode" - case mCodeWithScope: - str = "CodeWithScopeMode" - case mSpacer: - str = "CodeWithScopeSpacerFrame" - default: - str = "UnknownMode" - } - - return str -} - -// TransitionError is an error returned when an invalid progressing a -// ValueReader or ValueWriter state machine occurs. -type TransitionError struct { - parent mode - current mode - destination mode -} - -func (te TransitionError) Error() string { - if te.destination == mode(0) { - return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) - } - if te.parent == mode(0) { - return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) - } - return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go deleted file mode 100644 index 616a3e7..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var _ ValueEncoder = &PointerCodec{} -var _ ValueDecoder = &PointerCodec{} - -// PointerCodec is the Codec used for pointers. -type PointerCodec struct { - ecache map[reflect.Type]ValueEncoder - dcache map[reflect.Type]ValueDecoder - l sync.RWMutex -} - -// NewPointerCodec returns a PointerCodec that has been initialized. -func NewPointerCodec() *PointerCodec { - return &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), - } -} - -// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil -// or looking up an encoder for the type of value the pointer points to. -func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.Ptr { - if !val.IsValid() { - return vw.WriteNull() - } - return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - pc.l.RLock() - enc, ok := pc.ecache[val.Type()] - pc.l.RUnlock() - if ok { - if enc == nil { - return ErrNoEncoder{Type: val.Type()} - } - return enc.EncodeValue(ec, vw, val.Elem()) - } - - enc, err := ec.LookupEncoder(val.Type().Elem()) - pc.l.Lock() - pc.ecache[val.Type()] = enc - pc.l.Unlock() - if err != nil { - return err - } - - return enc.EncodeValue(ec, vw, val.Elem()) -} - -// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and -// using that to decode. If the BSON value is Null, this method will set the pointer to nil. -func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Ptr { - return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - } - - if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) - } - - pc.l.RLock() - dec, ok := pc.dcache[val.Type()] - pc.l.RUnlock() - if ok { - if dec == nil { - return ErrNoDecoder{Type: val.Type()} - } - return dec.DecodeValue(dc, vr, val.Elem()) - } - - dec, err := dc.LookupDecoder(val.Type().Elem()) - pc.l.Lock() - pc.dcache[val.Type()] = dec - pc.l.Unlock() - if err != nil { - return err - } - - return dec.DecodeValue(dc, vr, val.Elem()) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go deleted file mode 100644 index 4cf2b01..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types -// that implement this interface with have ProxyBSON called during the encoding process and that -// value will be encoded in place for the implementer. -type Proxy interface { - ProxyBSON() (interface{}, error) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go deleted file mode 100644 index 8064402..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. -var ErrNilType = errors.New("cannot perform a decoder lookup on ") - -// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. -var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") - -// ErrNoEncoder is returned when there wasn't an encoder available for a type. -type ErrNoEncoder struct { - Type reflect.Type -} - -func (ene ErrNoEncoder) Error() string { - if ene.Type == nil { - return "no encoder found for " - } - return "no encoder found for " + ene.Type.String() -} - -// ErrNoDecoder is returned when there wasn't a decoder available for a type. -type ErrNoDecoder struct { - Type reflect.Type -} - -func (end ErrNoDecoder) Error() string { - return "no decoder found for " + end.Type.String() -} - -// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. -type ErrNoTypeMapEntry struct { - Type bsontype.Type -} - -func (entme ErrNoTypeMapEntry) Error() string { - return "no type map entry found for " + entme.Type.String() -} - -// ErrNotInterface is returned when the provided type is not an interface. -var ErrNotInterface = errors.New("The provided type is not an interface") - -// A RegistryBuilder is used to build a Registry. This type is not goroutine -// safe. -type RegistryBuilder struct { - typeEncoders map[reflect.Type]ValueEncoder - interfaceEncoders []interfaceValueEncoder - kindEncoders map[reflect.Kind]ValueEncoder - - typeDecoders map[reflect.Type]ValueDecoder - interfaceDecoders []interfaceValueDecoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex -} - -// NewRegistryBuilder creates a new empty RegistryBuilder. -func NewRegistryBuilder() *RegistryBuilder { - return &RegistryBuilder{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), - } -} - -func buildDefaultRegistry() *Registry { - rb := NewRegistryBuilder() - defaultValueEncoders.RegisterDefaultEncoders(rb) - defaultValueDecoders.RegisterDefaultDecoders(rb) - return rb.Build() -} - -// RegisterCodec will register the provided ValueCodec for the provided type. -func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { - rb.RegisterTypeEncoder(t, codec) - rb.RegisterTypeDecoder(t, codec) - return rb -} - -// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. -// -// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It -// will not be called when marshalling a non-interface type that implements the interface. -func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.typeEncoders[t] = enc - return rb -} - -// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, encoder := range rb.interfaceEncoders { - if encoder.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) - return rb -} - -// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. -// -// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. -// It will not be called when unmarshalling into a non-interface type that implements the interface. -func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.typeDecoders[t] = dec - return rb -} - -// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, decoder := range rb.interfaceDecoders { - if decoder.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) - return rb -} - -// RegisterEncoder registers the provided type and encoder pair. -// -// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. -func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t == tEmpty { - rb.typeEncoders[t] = enc - return rb - } - switch t.Kind() { - case reflect.Interface: - for idx, ir := range rb.interfaceEncoders { - if ir.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) - default: - rb.typeEncoders[t] = enc - } - return rb -} - -// RegisterDecoder registers the provided type and decoder pair. -// -// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. -func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t == nil { - rb.typeDecoders[nil] = dec - return rb - } - if t == tEmpty { - rb.typeDecoders[t] = dec - return rb - } - switch t.Kind() { - case reflect.Interface: - for idx, ir := range rb.interfaceDecoders { - if ir.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) - default: - rb.typeDecoders[t] = dec - } - return rb -} - -// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided -// kind. -func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.kindEncoders[kind] = enc - return rb -} - -// RegisterDefaultDecoder will register the provided ValueDecoder to the -// provided kind. -func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.kindDecoders[kind] = dec - return rb -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.typeMap[bt] = rt - return rb -} - -// Build creates a Registry from the current state of this RegistryBuilder. -func (rb *RegistryBuilder) Build() *Registry { - registry := new(Registry) - - registry.typeEncoders = make(map[reflect.Type]ValueEncoder) - for t, enc := range rb.typeEncoders { - registry.typeEncoders[t] = enc - } - - registry.typeDecoders = make(map[reflect.Type]ValueDecoder) - for t, dec := range rb.typeDecoders { - registry.typeDecoders[t] = dec - } - - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.interfaceEncoders) - - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.interfaceDecoders) - - registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.kindEncoders { - registry.kindEncoders[kind] = enc - } - - registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.kindDecoders { - registry.kindDecoders[kind] = dec - } - - registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.typeMap { - registry.typeMap[bt] = rt - } - - return registry -} - -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: -// -// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using -// RegisterTypeEncoder for the interface will be selected. -// -// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the -// type. -// -// 3. An encoder registered for the reflect.Kind of the value. -// -// If no encoder is found, an error of type ErrNoEncoder is returned. -func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { - encodererr := ErrNoEncoder{Type: t} - r.mu.RLock() - enc, found := r.lookupTypeEncoder(t) - r.mu.RUnlock() - if found { - if enc == nil { - return nil, ErrNoEncoder{Type: t} - } - return enc, nil - } - - enc, found = r.lookupInterfaceEncoder(t, true) - if found { - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil - } - - if t == nil { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr - } - - enc, found = r.kindEncoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr - } - - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil -} - -func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[t] - return enc, found -} - -func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if t == nil { - return nil, false - } - for _, ienc := range r.interfaceEncoders { - if t.Implements(ienc.i) { - return ienc.ve, true - } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(t, false) - if !found { - defaultEnc = r.kindEncoders[t.Kind()] - } - return newCondAddrEncoder(ienc.ve, defaultEnc), true - } - } - return nil, false -} - -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: -// -// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using -// RegisterTypeDecoder for the interface will be selected. -// -// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the -// type. -// -// 3. A decoder registered for the reflect.Kind of the value. -// -// If no decoder is found, an error of type ErrNoDecoder is returned. -func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { - if t == nil { - return nil, ErrNilType - } - decodererr := ErrNoDecoder{Type: t} - r.mu.RLock() - dec, found := r.lookupTypeDecoder(t) - r.mu.RUnlock() - if found { - if dec == nil { - return nil, ErrNoDecoder{Type: t} - } - return dec, nil - } - - dec, found = r.lookupInterfaceDecoder(t, true) - if found { - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil - } - - dec, found = r.kindDecoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeDecoders[t] = nil - r.mu.Unlock() - return nil, decodererr - } - - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil -} - -func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[t] - return dec, found -} - -func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { - for _, idec := range r.interfaceDecoders { - if t.Implements(idec.i) { - return idec.vd, true - } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(t, false) - if !found { - defaultDec = r.kindDecoders[t.Kind()] - } - return newCondAddrDecoder(idec.vd, defaultDec), true - } - } - return nil, false -} - -// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON -// type. If no type is found, ErrNoTypeMapEntry is returned. -func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - t, ok := r.typeMap[bt] - if !ok || t == nil { - return nil, ErrNoTypeMapEntry{Type: bt} - } - return t, nil -} - -type interfaceValueEncoder struct { - i reflect.Type - ve ValueEncoder -} - -type interfaceValueDecoder struct { - i reflect.Type - vd ValueDecoder -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go deleted file mode 100644 index 3c1b6b8..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var defaultSliceCodec = NewSliceCodec() - -// SliceCodec is the Codec used for slice values. -type SliceCodec struct { - EncodeNilAsEmpty bool -} - -var _ ValueCodec = &MapCodec{} - -// NewSliceCodec returns a MapCodec with options opts. -func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { - sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) - - codec := SliceCodec{} - if sliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for slice types. -func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() && !sc.EncodeNilAsEmpty { - return vw.WriteNull() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if lookupErr == errInvalidValue { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// DecodeValue is the ValueDecoder for slice types. -func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) - } - - val.SetLen(0) - for _, elem := range data { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } - return nil - case bsontype.String: - if sliceType := val.Type().Elem(); sliceType != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) - } - str, err := vr.ReadString() - if err != nil { - return err - } - byteStr := []byte(str) - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) - } - - val.SetLen(0) - for _, elem := range byteStr { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } - return nil - default: - return fmt.Errorf("cannot decode %v into a slice", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = defaultValueDecoders.decodeD - default: - elemsFunc = defaultValueDecoders.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go deleted file mode 100644 index 5332b7c..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// StringCodec is the Codec used for struct values. -type StringCodec struct { - DecodeObjectIDAsHex bool -} - -var ( - defaultStringCodec = NewStringCodec() - - _ ValueCodec = defaultStringCodec - _ typeDecoder = defaultStringCodec -) - -// NewStringCodec returns a StringCodec with options opts. -func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { - stringOpt := bsonoptions.MergeStringCodecOptions(opts...) - return &StringCodec{*stringOpt.DecodeObjectIDAsHex} -} - -// EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.String { - return emptyValue, ValueDecoderError{ - Name: "StringDecodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: reflect.Zero(t), - } - } - - var str string - var err error - switch vr.Type() { - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - if sc.DecodeObjectIDAsHex { - str = oid.Hex() - } else { - byteArray := [12]byte(oid) - str = string(byteArray[:]) - } - case bsontype.Symbol: - str, err = vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} - } - str = string(data) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - - return reflect.ValueOf(str), nil -} - -// DecodeValue is the ValueDecoder for string types. -func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - elem, err := sc.decodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go deleted file mode 100644 index be3f208..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ /dev/null @@ -1,664 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. -type DecodeError struct { - keys []string - wrapped error -} - -// Unwrap returns the underlying error -func (de *DecodeError) Unwrap() error { - return de.wrapped -} - -// Error implements the error interface. -func (de *DecodeError) Error() string { - // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the - // stack of BSON keys, so we call de.Keys(), which reverses them. - keyPath := strings.Join(de.Keys(), ".") - return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) -} - -// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down -// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be -// a string, the keys slice will be ["a", "b", "c"]. -func (de *DecodeError) Keys() []string { - reversedKeys := make([]string, 0, len(de.keys)) - for idx := len(de.keys) - 1; idx >= 0; idx-- { - reversedKeys = append(reversedKeys, de.keys[idx]) - } - - return reversedKeys -} - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// StructCodec is the Codec used for struct values. -type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex - parser StructTagParser - DecodeZeroStruct bool - DecodeDeepZeroInline bool - EncodeOmitDefaultStruct bool - AllowUnexportedFields bool - OverwriteDuplicatedInlinedFields bool -} - -var _ ValueEncoder = &StructCodec{} -var _ ValueDecoder = &StructCodec{} - -// NewStructCodec returns a StructCodec that uses p for struct tag parsing. -func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { - if p == nil { - return nil, errors.New("a StructTagParser must be provided to NewStructCodec") - } - - structOpt := bsonoptions.MergeStructCodecOptions(opts...) - - codec := &StructCodec{ - cache: make(map[reflect.Type]*structDescription), - parser: p, - } - - if structOpt.DecodeZeroStruct != nil { - codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct - } - if structOpt.DecodeDeepZeroInline != nil { - codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline - } - if structOpt.EncodeOmitDefaultStruct != nil { - codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct - } - if structOpt.OverwriteDuplicatedInlinedFields != nil { - codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields - } - if structOpt.AllowUnexportedFields != nil { - codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields - } - - return codec, nil -} - -// EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Struct { - return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - sd, err := sc.describeStruct(r.Registry, val.Type()) - if err != nil { - return err - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - var rv reflect.Value - for _, desc := range sd.fl { - if desc.inline == nil { - rv = val.Field(desc.idx) - } else { - rv, err = fieldByIndexErr(val, desc.inline) - if err != nil { - continue - } - } - - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) - - if err != nil && err != errInvalidValue { - return err - } - - if err == errInvalidValue { - if desc.omitEmpty { - continue - } - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - err = vw2.WriteNull() - if err != nil { - return err - } - continue - } - - if desc.encoder == nil { - return ErrNoEncoder{Type: rv.Type()} - } - - encoder := desc.encoder - - var isZero bool - rvInterface := rv.Interface() - if cz, ok := encoder.(CodecZeroer); ok { - isZero = cz.IsTypeZero(rvInterface) - } else if rv.Kind() == reflect.Interface { - // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. - isZero = rv.IsNil() - } else { - isZero = sc.isZero(rvInterface) - } - if desc.omitEmpty && isZero { - continue - } - - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - - ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} - err = encoder.EncodeValue(ectx, vw2, rv) - if err != nil { - return err - } - } - - if sd.inlineMap >= 0 { - rv := val.Field(sd.inlineMap) - collisionFn := func(key string) bool { - _, exists := sd.fm[key] - return exists - } - - return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) - } - - return dw.WriteDocumentEnd() -} - -func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { - return &DecodeError{ - keys: []string{key}, - wrapped: original, - } - } - - de.keys = append(de.keys, key) - return de -} - -// DecodeValue implements the Codec interface. -// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. -// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Struct { - return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - sd, err := sc.describeStruct(r.Registry, val.Type()) - if err != nil { - return err - } - - if sc.DecodeZeroStruct { - val.Set(reflect.Zero(val.Type())) - } - if sc.DecodeDeepZeroInline && sd.inline { - val.Set(deepZero(val.Type())) - } - - var decoder ValueDecoder - var inlineMap reflect.Value - if sd.inlineMap >= 0 { - inlineMap = val.Field(sd.inlineMap) - decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) - if err != nil { - return err - } - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - for { - name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - fd, exists := sd.fm[name] - if !exists { - // if the original name isn't found in the struct description, try again with the name in lowercase - // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field - // names - fd, exists = sd.fm[strings.ToLower(name)] - } - - if !exists { - if sd.inlineMap < 0 { - // The encoding/json package requires a flag to return on error for non-existent fields. - // This functionality seems appropriate for the struct codec. - err = vr.Skip() - if err != nil { - return err - } - continue - } - - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - - elem := reflect.New(inlineMap.Type().Elem()).Elem() - r.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(r, vr, elem) - if err != nil { - return err - } - inlineMap.SetMapIndex(reflect.ValueOf(name), elem) - continue - } - - var field reflect.Value - if fd.inline == nil { - field = val.Field(fd.idx) - } else { - field, err = getInlineField(val, fd.inline) - if err != nil { - return err - } - } - - if !field.CanSet() { // Being settable is a super set of being addressable. - innerErr := fmt.Errorf("field %v is not settable", field) - return newDecodeError(fd.name, innerErr) - } - if field.Kind() == reflect.Ptr && field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - } - field = field.Addr() - - dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate} - if fd.decoder == nil { - return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) - } - - err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - } - - return nil -} - -func (sc *StructCodec) isZero(i interface{}) bool { - v := reflect.ValueOf(i) - - // check the value validity - if !v.IsValid() { - return true - } - - if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { - return z.IsZero() - } - - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Struct: - if sc.EncodeOmitDefaultStruct { - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() - } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - fld := v.Field(i) - if !sc.isZero(fld.Interface()) { - return false - } - } - return true - } - } - - return false -} - -type structDescription struct { - fm map[string]fieldDescription - fl []fieldDescription - inlineMap int - inline bool -} - -type fieldDescription struct { - name string // BSON key name - fieldName string // struct field name - idx int - omitEmpty bool - minSize bool - truncate bool - inline []int - encoder ValueEncoder - decoder ValueDecoder -} - -type byIndex []fieldDescription - -func (bi byIndex) Len() int { return len(bi) } - -func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } - -func (bi byIndex) Less(i, j int) bool { - // If a field is inlined, its index in the top level struct is stored at inline[0] - iIdx, jIdx := bi[i].idx, bi[j].idx - if len(bi[i].inline) > 0 { - iIdx = bi[i].inline[0] - } - if len(bi[j].inline) > 0 { - jIdx = bi[j].inline[0] - } - if iIdx != jIdx { - return iIdx < jIdx - } - for k, biik := range bi[i].inline { - if k >= len(bi[j].inline) { - return false - } - if biik != bi[j].inline[k] { - return biik < bi[j].inline[k] - } - } - return len(bi[i].inline) < len(bi[j].inline) -} - -func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { - // We need to analyze the struct, including getting the tags, collecting - // information about inlining, and create a map of the field name to the field. - sc.l.RLock() - ds, exists := sc.cache[t] - sc.l.RUnlock() - if exists { - return ds, nil - } - - numFields := t.NumField() - sd := &structDescription{ - fm: make(map[string]fieldDescription, numFields), - fl: make([]fieldDescription, 0, numFields), - inlineMap: -1, - } - - var fields []fieldDescription - for i := 0; i < numFields; i++ { - sf := t.Field(i) - if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { - // field is private or unexported fields aren't allowed, ignore - continue - } - - sfType := sf.Type - encoder, err := r.LookupEncoder(sfType) - if err != nil { - encoder = nil - } - decoder, err := r.LookupDecoder(sfType) - if err != nil { - decoder = nil - } - - description := fieldDescription{ - fieldName: sf.Name, - idx: i, - encoder: encoder, - decoder: decoder, - } - - stags, err := sc.parser.ParseStructTags(sf) - if err != nil { - return nil, err - } - if stags.Skip { - continue - } - description.name = stags.Name - description.omitEmpty = stags.OmitEmpty - description.minSize = stags.MinSize - description.truncate = stags.Truncate - - if stags.Inline { - sd.inline = true - switch sfType.Kind() { - case reflect.Map: - if sd.inlineMap >= 0 { - return nil, errors.New("(struct " + t.String() + ") multiple inline maps") - } - if sfType.Key() != tString { - return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") - } - sd.inlineMap = description.idx - case reflect.Ptr: - sfType = sfType.Elem() - if sfType.Kind() != reflect.Struct { - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - fallthrough - case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType) - if err != nil { - return nil, err - } - for _, fd := range inlinesf.fl { - if fd.inline == nil { - fd.inline = []int{i, fd.idx} - } else { - fd.inline = append([]int{i}, fd.inline...) - } - fields = append(fields, fd) - - } - default: - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - continue - } - fields = append(fields, description) - } - - // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name - sort.Slice(fields, func(i, j int) bool { - x := fields - // sort field by name, breaking ties with depth, then - // breaking ties with index sequence. - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].inline) != len(x[j].inline) { - return len(x[i].inline) < len(x[j].inline) - } - return byIndex(x).Less(i, j) - }) - - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - sd.fl = append(sd.fl, fi) - sd.fm[name] = fi - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields { - return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) - } - sd.fl = append(sd.fl, dominant) - sd.fm[name] = dominant - } - - sort.Sort(byIndex(sd.fl)) - - sc.l.Lock() - sc.cache[t] = sd - sc.l.Unlock() - - return sd, nil -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's inlining rules. If there are multiple top-level -// fields, the boolean will be false: This condition is an error in Go -// and we skip all the fields. -func dominantField(fields []fieldDescription) (fieldDescription, bool) { - // The fields are sorted in increasing index-length order, then by presence of tag. - // That means that the first field is the dominant one. We need only check - // for error cases: two fields at top level. - if len(fields) > 1 && - len(fields[0].inline) == len(fields[1].inline) { - return fieldDescription{}, false - } - return fields[0], true -} - -func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { - defer func() { - if recovered := recover(); recovered != nil { - switch r := recovered.(type) { - case string: - err = fmt.Errorf("%s", r) - case error: - err = r - } - } - }() - - result = v.FieldByIndex(index) - return -} - -func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { - field, err := fieldByIndexErr(val, index) - if err == nil { - return field, nil - } - - // if parent of this element doesn't exist, fix its parent - inlineParent := index[:len(index)-1] - var fParent reflect.Value - if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { - fParent, err = getInlineField(val, inlineParent) - if err != nil { - return fParent, err - } - } - fParent.Set(reflect.New(fParent.Type().Elem())) - - return fieldByIndexErr(val, index) -} - -// DeepZero returns recursive zero object -func deepZero(st reflect.Type) (result reflect.Value) { - result = reflect.Indirect(reflect.New(st)) - - if result.Kind() == reflect.Struct { - for i := 0; i < result.NumField(); i++ { - if f := result.Field(i); f.Kind() == reflect.Ptr { - if f.CanInterface() { - if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) - } - } - } - } - } - - return -} - -// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside -func recursivePointerTo(v reflect.Value) reflect.Value { - v = reflect.Indirect(v) - result := reflect.New(v.Type()) - if v.Kind() == reflect.Struct { - for i := 0; i < v.NumField(); i++ { - if f := v.Field(i); f.Kind() == reflect.Ptr { - if f.Elem().Kind() == reflect.Struct { - result.Elem().Field(i).Set(recursivePointerTo(f)) - } - } - } - } - - return result -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go deleted file mode 100644 index 62708c5..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "strings" -) - -// StructTagParser returns the struct tags for a given struct field. -type StructTagParser interface { - ParseStructTags(reflect.StructField) (StructTags, error) -} - -// StructTagParserFunc is an adapter that allows a generic function to be used -// as a StructTagParser. -type StructTagParserFunc func(reflect.StructField) (StructTags, error) - -// ParseStructTags implements the StructTagParser interface. -func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { - return stpf(sf) -} - -// StructTags represents the struct tag fields that the StructCodec uses during -// the encoding and decoding process. -// -// In the case of a struct, the lowercased field name is used as the key for each exported -// field but this behavior may be changed using a struct tag. The tag may also contain flags to -// adjust the marshalling behavior for the field. -// -// The properties are defined below: -// -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. -// -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. -// -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. -// -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. -// -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. -// -// TODO(skriptble): Add tags for undefined as nil and for null as nil. -type StructTags struct { - Name string - OmitEmpty bool - MinSize bool - Truncate bool - Inline bool - Skip bool -} - -// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. -// It will handle the bson struct tag. See the documentation for StructTags to see -// what each of the returned fields means. -// -// If there is no name in the struct tag fields, the struct field name is lowercased. -// The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// An example: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -// A struct tag either consisting entirely of '-' or with a bson key with a -// value consisting entirely of '-' will return a StructTags with Skip true and -// the remaining fields will be their default values. -var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - return parseTags(key, tag) -} - -func parseTags(key string, tag string) (StructTags, error) { - var st StructTags - if tag == "-" { - st.Skip = true - return st, nil - } - - for idx, str := range strings.Split(tag, ",") { - if idx == 0 && str != "" { - key = str - } - switch str { - case "omitempty": - st.OmitEmpty = true - case "minsize": - st.MinSize = true - case "truncate": - st.Truncate = true - case "inline": - st.Inline = true - } - } - - st.Name = key - - return st, nil -} - -// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser -// but will also fallback to parsing the json tag instead on a field where the -// bson tag isn't available. -var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok { - tag, ok = sf.Tag.Lookup("json") - } - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - - return parseTags(key, tag) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go deleted file mode 100644 index ec7e30f..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -const ( - timeFormatString = "2006-01-02T15:04:05.999Z07:00" -) - -// TimeCodec is the Codec used for time.Time values. -type TimeCodec struct { - UseLocalTimeZone bool -} - -var ( - defaultTimeCodec = NewTimeCodec() - - _ ValueCodec = defaultTimeCodec - _ typeDecoder = defaultTimeCodec -) - -// NewTimeCodec returns a TimeCodec with options opts. -func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { - timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) - - codec := TimeCodec{} - if timeOpt.UseLocalTimeZone != nil { - codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone - } - return &codec -} - -func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tTime { - return emptyValue, ValueDecoderError{ - Name: "TimeDecodeValue", - Types: []reflect.Type{tTime}, - Received: reflect.Zero(t), - } - } - - var timeVal time.Time - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(dt/1000, dt%1000*1000000) - case bsontype.String: - // assume strings are in the isoTimeFormat - timeStr, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - timeVal, err = time.Parse(timeFormatString, timeStr) - if err != nil { - return emptyValue, err - } - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(i64/1000, i64%1000*1000000) - case bsontype.Timestamp: - t, _, err := vr.ReadTimestamp() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(int64(t), 0) - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) - } - - if !tc.UseLocalTimeZone { - timeVal = timeVal.UTC() - } - return reflect.ValueOf(timeVal), nil -} - -// DecodeValue is the ValueDecoderFunc for time.Time. -func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - elem, err := tc.decodeType(dc, vr, tTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go deleted file mode 100644 index 07f4b70..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "net/url" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var tBool = reflect.TypeOf(false) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tString = reflect.TypeOf("") -var tTime = reflect.TypeOf(time.Time{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tByteSlice = reflect.TypeOf([]byte(nil)) -var tByte = reflect.TypeOf(byte(0x00)) -var tURL = reflect.TypeOf(url.URL{}) -var tJSONNumber = reflect.TypeOf(json.Number("")) - -var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() -var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() -var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() -var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) -var tD = reflect.TypeOf(primitive.D{}) -var tA = reflect.TypeOf(primitive.A{}) -var tE = reflect.TypeOf(primitive.E{}) - -var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) -var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go deleted file mode 100644 index 0b21ce9..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "math" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// UIntCodec is the Codec used for uint values. -type UIntCodec struct { - EncodeToMinSize bool -} - -var ( - defaultUIntCodec = NewUIntCodec() - - _ ValueCodec = defaultUIntCodec - _ typeDecoder = defaultUIntCodec -) - -// NewUIntCodec returns a UIntCodec with options opts. -func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { - uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) - - codec := UIntCodec{} - if uintOpt.EncodeToMinSize != nil { - codec.EncodeToMinSize = *uintOpt.EncodeToMinSize - } - return &codec -} - -// EncodeValue is the ValueEncoder for uint types. -func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - - // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 - useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) - - if u64 <= math.MaxInt32 && useMinSize { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return emptyValue, fmt.Errorf("%d overflows uint8", i64) - } - - return reflect.ValueOf(uint8(i64)), nil - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return emptyValue, fmt.Errorf("%d overflows uint16", i64) - } - - return reflect.ValueOf(uint16(i64)), nil - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return emptyValue, fmt.Errorf("%d overflows uint32", i64) - } - - return reflect.ValueOf(uint32(i64)), nil - case reflect.Uint64: - if i64 < 0 { - return emptyValue, fmt.Errorf("%d overflows uint64", i64) - } - - return reflect.ValueOf(uint64(i64)), nil - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return emptyValue, fmt.Errorf("%d overflows uint", i64) - } - - return reflect.ValueOf(uint(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: reflect.Zero(t), - } - } -} - -// DecodeValue is the ValueDecoder for uint types. -func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - elem, err := uic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetUint(elem.Uint()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go deleted file mode 100644 index b1256a4..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. -type ByteSliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -} - -// ByteSliceCodec creates a new *ByteSliceCodecOptions -func ByteSliceCodec() *ByteSliceCodecOptions { - return &ByteSliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { - bs.EncodeNilAsEmpty = &b - return bs -} - -// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. -func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { - bs := ByteSliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return bs -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go deleted file mode 100644 index c40973c..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonoptions defines the optional configurations for the BSON codecs. -package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go deleted file mode 100644 index 6caaa00..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. -type EmptyInterfaceCodecOptions struct { - DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -} - -// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions -func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { - return &EmptyInterfaceCodecOptions{} -} - -// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { - e.DecodeBinaryAsSlice = &b - return e -} - -// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. -func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { - e := EmptyInterfaceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeBinaryAsSlice != nil { - e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice - } - } - - return e -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go deleted file mode 100644 index 7a6a880..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// MapCodecOptions represents all possible options for map encoding and decoding. -type MapCodecOptions struct { - DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. - EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. - // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must - // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a - // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the - // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override - // TextMarshaler/TextUnmarshaler. Defaults to false. - EncodeKeysWithStringer *bool -} - -// MapCodec creates a new *MapCodecOptions -func MapCodec() *MapCodecOptions { - return &MapCodecOptions{} -} - -// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. -func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { - t.DecodeZerosMap = &b - return t -} - -// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. -func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { - t.EncodeNilAsEmpty = &b - return t -} - -// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the -// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key -// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with -// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer -// will override TextMarshaler/TextUnmarshaler. Defaults to false. -func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { - t.EncodeKeysWithStringer = &b - return t -} - -// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. -func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { - s := MapCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeZerosMap != nil { - s.DecodeZerosMap = opt.DecodeZerosMap - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - if opt.EncodeKeysWithStringer != nil { - s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go deleted file mode 100644 index ef965e4..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// SliceCodecOptions represents all possible options for slice encoding and decoding. -type SliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -} - -// SliceCodec creates a new *SliceCodecOptions -func SliceCodec() *SliceCodecOptions { - return &SliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { - s.EncodeNilAsEmpty = &b - return s -} - -// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. -func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { - s := SliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go deleted file mode 100644 index 65964f4..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultDecodeOIDAsHex = true - -// StringCodecOptions represents all possible options for string encoding and decoding. -type StringCodecOptions struct { - DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. -} - -// StringCodec creates a new *StringCodecOptions -func StringCodec() *StringCodecOptions { - return &StringCodecOptions{} -} - -// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made -// from the raw object ID bytes will be used. Defaults to true. -func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { - t.DecodeObjectIDAsHex = &b - return t -} - -// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. -func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { - s := &StringCodecOptions{&defaultDecodeOIDAsHex} - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeObjectIDAsHex != nil { - s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go deleted file mode 100644 index 78d1dd8..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultOverwriteDuplicatedInlinedFields = true - -// StructCodecOptions represents all possible options for struct encoding and decoding. -type StructCodecOptions struct { - DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. - DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. - EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. - AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. - OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. -} - -// StructCodec creates a new *StructCodecOptions -func StructCodec() *StructCodecOptions { - return &StructCodecOptions{} -} - -// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. -func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { - t.DecodeZeroStruct = &b - return t -} - -// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. -func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { - t.DecodeDeepZeroInline = &b - return t -} - -// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all -// its values set to their default value. Defaults to false. -func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { - t.EncodeOmitDefaultStruct = &b - return t -} - -// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the -// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when -// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if -// there are duplicate keys after the struct is inlined. Defaults to true. -func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { - t.OverwriteDuplicatedInlinedFields = &b - return t -} - -// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. -func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { - t.AllowUnexportedFields = &b - return t -} - -// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. -func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { - s := &StructCodecOptions{ - OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, - } - for _, opt := range opts { - if opt == nil { - continue - } - - if opt.DecodeZeroStruct != nil { - s.DecodeZeroStruct = opt.DecodeZeroStruct - } - if opt.DecodeDeepZeroInline != nil { - s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline - } - if opt.EncodeOmitDefaultStruct != nil { - s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct - } - if opt.OverwriteDuplicatedInlinedFields != nil { - s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields - } - if opt.AllowUnexportedFields != nil { - s.AllowUnexportedFields = opt.AllowUnexportedFields - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go deleted file mode 100644 index 13496d1..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// TimeCodecOptions represents all possible options for time.Time encoding and decoding. -type TimeCodecOptions struct { - UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. -} - -// TimeCodec creates a new *TimeCodecOptions -func TimeCodec() *TimeCodecOptions { - return &TimeCodecOptions{} -} - -// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. -func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { - t.UseLocalTimeZone = &b - return t -} - -// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. -func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { - t := TimeCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.UseLocalTimeZone != nil { - t.UseLocalTimeZone = opt.UseLocalTimeZone - } - } - - return t -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go deleted file mode 100644 index e08b7f1..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// UIntCodecOptions represents all possible options for uint encoding and decoding. -type UIntCodecOptions struct { - EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -} - -// UIntCodec creates a new *UIntCodecOptions -func UIntCodec() *UIntCodecOptions { - return &UIntCodecOptions{} -} - -// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { - u.EncodeToMinSize = &b - return u -} - -// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. -func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { - u := UIntCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeToMinSize != nil { - u.EncodeToMinSize = opt.EncodeToMinSize - } - } - - return u -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go deleted file mode 100644 index 5cdf646..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "fmt" - "io" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// Copier is a type that allows copying between ValueReaders, ValueWriters, and -// []byte values. -type Copier struct{} - -// NewCopier creates a new copier with the given registry. If a nil registry is provided -// a default registry is used. -func NewCopier() Copier { - return Copier{} -} - -// CopyDocument handles copying a document from src to dst. -func CopyDocument(dst ValueWriter, src ValueReader) error { - return Copier{}.CopyDocument(dst, src) -} - -// CopyDocument handles copying one document from the src to the dst. -func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { - dr, err := src.ReadDocument() - if err != nil { - return err - } - - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - return c.copyDocumentCore(dw, dr) -} - -// CopyArrayFromBytes copies the values from a BSON array represented as a -// []byte to a ValueWriter. -func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { - aw, err := dst.WriteArray() - if err != nil { - return err - } - - err = c.CopyBytesToArrayWriter(aw, src) - if err != nil { - return err - } - - return aw.WriteArrayEnd() -} - -// CopyDocumentFromBytes copies the values from a BSON document represented as a -// []byte to a ValueWriter. -func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - err = c.CopyBytesToDocumentWriter(dw, src) - if err != nil { - return err - } - - return dw.WriteDocumentEnd() -} - -type writeElementFn func(key string) (ValueWriter, error) - -// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an -// ArrayWriter. -func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { - wef := func(_ string) (ValueWriter, error) { - return dst.WriteArrayElement() - } - - return c.copyBytesToValueWriter(src, wef) -} - -// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a -// DocumentWriter. -func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { - wef := func(key string) (ValueWriter, error) { - return dst.WriteDocumentElement(key) - } - - return c.copyBytesToValueWriter(src, wef) -} - -func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. - length, rem, ok := bsoncore.ReadLength(src) - if !ok { - return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) - } - if len(src) < int(length) { - return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) - } - rem = rem[:length-4] - - var t bsontype.Type - var key string - var val bsoncore.Value - for { - t, rem, ok = bsoncore.ReadType(rem) - if !ok { - return io.EOF - } - if t == bsontype.Type(0) { - if len(rem) != 0 { - return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) - } - break - } - - key, rem, ok = bsoncore.ReadKey(rem) - if !ok { - return fmt.Errorf("invalid key found. remaining bytes=%v", rem) - } - - // write as either array element or document element using writeElementFn - vw, err := wef(key) - if err != nil { - return err - } - - val, rem, ok = bsoncore.ReadValue(rem, t) - if !ok { - return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) - } - err = c.CopyValueFromBytes(vw, t, val.Data) - if err != nil { - return err - } - } - return nil -} - -// CopyDocumentToBytes copies an entire document from the ValueReader and -// returns it as bytes. -func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { - return c.AppendDocumentBytes(nil, src) -} - -// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will -// append the result to dst. -func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) - - vw.reset(dst) - - err := c.CopyDocument(vw, src) - dst = vw.buf - return dst, err -} - -// AppendArrayBytes copies an array from the ValueReader to dst. -func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) - - vw.reset(dst) - - err := c.copyArray(vw, src) - dst = vw.buf - return dst, err -} - -// CopyValueFromBytes will write the value represtend by t and src to dst. -func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { - if wvb, ok := dst.(BytesWriter); ok { - return wvb.WriteValueBytes(t, src) - } - - vr := vrPool.Get().(*valueReader) - defer vrPool.Put(vr) - - vr.reset(src) - vr.pushElement(t) - - return c.CopyValue(dst, vr) -} - -// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a -// []byte. -func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { - return c.AppendValueBytes(nil, src) -} - -// AppendValueBytes functions the same as CopyValueToBytes, but will append the -// result to dst. -func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { - if br, ok := src.(BytesReader); ok { - return br.ReadValueBytes(dst) - } - - vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) - - start := len(dst) - - vw.reset(dst) - vw.push(mElement) - - err := c.CopyValue(vw, src) - if err != nil { - return 0, dst, err - } - - return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil -} - -// CopyValue will copy a single value from src to dst. -func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { - var err error - switch src.Type() { - case bsontype.Double: - var f64 float64 - f64, err = src.ReadDouble() - if err != nil { - break - } - err = dst.WriteDouble(f64) - case bsontype.String: - var str string - str, err = src.ReadString() - if err != nil { - return err - } - err = dst.WriteString(str) - case bsontype.EmbeddedDocument: - err = c.CopyDocument(dst, src) - case bsontype.Array: - err = c.copyArray(dst, src) - case bsontype.Binary: - var data []byte - var subtype byte - data, subtype, err = src.ReadBinary() - if err != nil { - break - } - err = dst.WriteBinaryWithSubtype(data, subtype) - case bsontype.Undefined: - err = src.ReadUndefined() - if err != nil { - break - } - err = dst.WriteUndefined() - case bsontype.ObjectID: - var oid primitive.ObjectID - oid, err = src.ReadObjectID() - if err != nil { - break - } - err = dst.WriteObjectID(oid) - case bsontype.Boolean: - var b bool - b, err = src.ReadBoolean() - if err != nil { - break - } - err = dst.WriteBoolean(b) - case bsontype.DateTime: - var dt int64 - dt, err = src.ReadDateTime() - if err != nil { - break - } - err = dst.WriteDateTime(dt) - case bsontype.Null: - err = src.ReadNull() - if err != nil { - break - } - err = dst.WriteNull() - case bsontype.Regex: - var pattern, options string - pattern, options, err = src.ReadRegex() - if err != nil { - break - } - err = dst.WriteRegex(pattern, options) - case bsontype.DBPointer: - var ns string - var pointer primitive.ObjectID - ns, pointer, err = src.ReadDBPointer() - if err != nil { - break - } - err = dst.WriteDBPointer(ns, pointer) - case bsontype.JavaScript: - var js string - js, err = src.ReadJavascript() - if err != nil { - break - } - err = dst.WriteJavascript(js) - case bsontype.Symbol: - var symbol string - symbol, err = src.ReadSymbol() - if err != nil { - break - } - err = dst.WriteSymbol(symbol) - case bsontype.CodeWithScope: - var code string - var srcScope DocumentReader - code, srcScope, err = src.ReadCodeWithScope() - if err != nil { - break - } - - var dstScope DocumentWriter - dstScope, err = dst.WriteCodeWithScope(code) - if err != nil { - break - } - err = c.copyDocumentCore(dstScope, srcScope) - case bsontype.Int32: - var i32 int32 - i32, err = src.ReadInt32() - if err != nil { - break - } - err = dst.WriteInt32(i32) - case bsontype.Timestamp: - var t, i uint32 - t, i, err = src.ReadTimestamp() - if err != nil { - break - } - err = dst.WriteTimestamp(t, i) - case bsontype.Int64: - var i64 int64 - i64, err = src.ReadInt64() - if err != nil { - break - } - err = dst.WriteInt64(i64) - case bsontype.Decimal128: - var d128 primitive.Decimal128 - d128, err = src.ReadDecimal128() - if err != nil { - break - } - err = dst.WriteDecimal128(d128) - case bsontype.MinKey: - err = src.ReadMinKey() - if err != nil { - break - } - err = dst.WriteMinKey() - case bsontype.MaxKey: - err = src.ReadMaxKey() - if err != nil { - break - } - err = dst.WriteMaxKey() - default: - err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) - } - - return err -} - -func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { - ar, err := src.ReadArray() - if err != nil { - return err - } - - aw, err := dst.WriteArray() - if err != nil { - return err - } - - for { - vr, err := ar.ReadValue() - if err == ErrEOA { - break - } - if err != nil { - return err - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { - for { - key, vr, err := dr.ReadElement() - if err == ErrEOD { - break - } - if err != nil { - return err - } - - vw, err := dw.WriteDocumentElement(key) - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go deleted file mode 100644 index 750b0d2..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonrw contains abstractions for reading and writing -// BSON and BSON like types from sources. -package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go deleted file mode 100644 index 54c76bf..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -const maxNestingDepth = 200 - -// ErrInvalidJSON indicates the JSON input is invalid -var ErrInvalidJSON = errors.New("invalid JSON input") - -type jsonParseState byte - -const ( - jpsStartState jsonParseState = iota - jpsSawBeginObject - jpsSawEndObject - jpsSawBeginArray - jpsSawEndArray - jpsSawColon - jpsSawComma - jpsSawKey - jpsSawValue - jpsDoneState - jpsInvalidState -) - -type jsonParseMode byte - -const ( - jpmInvalidMode jsonParseMode = iota - jpmObjectMode - jpmArrayMode -) - -type extJSONValue struct { - t bsontype.Type - v interface{} -} - -type extJSONObject struct { - keys []string - values []*extJSONValue -} - -type extJSONParser struct { - js *jsonScanner - s jsonParseState - m []jsonParseMode - k string - v *extJSONValue - - err error - canonical bool - depth int - maxDepth int - - emptyObject bool - relaxedUUID bool -} - -// newExtJSONParser returns a new extended JSON parser, ready to to begin -// parsing from the first character of the argued json input. It will not -// perform any read-ahead and will therefore not report any errors about -// malformed JSON at this point. -func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { - return &extJSONParser{ - js: &jsonScanner{r: r}, - s: jpsStartState, - m: []jsonParseMode{}, - canonical: canonical, - maxDepth: maxNestingDepth, - } -} - -// peekType examines the next value and returns its BSON Type -func (ejp *extJSONParser) peekType() (bsontype.Type, error) { - var t bsontype.Type - var err error - initialState := ejp.s - - ejp.advanceState() - switch ejp.s { - case jpsSawValue: - t = ejp.v.t - case jpsSawBeginArray: - t = bsontype.Array - case jpsInvalidState: - err = ejp.err - case jpsSawComma: - // in array mode, seeing a comma means we need to progress again to actually observe a type - if ejp.peekMode() == jpmArrayMode { - return ejp.peekType() - } - case jpsSawEndArray: - // this would only be a valid state if we were in array mode, so return end-of-array error - err = ErrEOA - case jpsSawBeginObject: - // peek key to determine type - ejp.advanceState() - switch ejp.s { - case jpsSawEndObject: // empty embedded document - t = bsontype.EmbeddedDocument - ejp.emptyObject = true - case jpsInvalidState: - err = ejp.err - case jpsSawKey: - if initialState == jpsStartState { - return bsontype.EmbeddedDocument, nil - } - t = wrapperKeyBSONType(ejp.k) - - // if $uuid is encountered, parse as binary subtype 4 - if ejp.k == "$uuid" { - ejp.relaxedUUID = true - t = bsontype.Binary - } - - switch t { - case bsontype.JavaScript: - // just saw $code, need to check for $scope at same level - _, err = ejp.readValue(bsontype.JavaScript) - if err != nil { - break - } - - switch ejp.s { - case jpsSawEndObject: // type is TypeJavaScript - case jpsSawComma: - ejp.advanceState() - - if ejp.s == jpsSawKey && ejp.k == "$scope" { - t = bsontype.CodeWithScope - } else { - err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) - } - case jpsInvalidState: - err = ejp.err - default: - err = ErrInvalidJSON - } - case bsontype.CodeWithScope: - err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") - } - } - } - - return t, err -} - -// readKey parses the next key and its type and returns them -func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { - if ejp.emptyObject { - ejp.emptyObject = false - return "", 0, ErrEOD - } - - // advance to key (or return with error) - switch ejp.s { - case jpsStartState: - ejp.advanceState() - if ejp.s == jpsSawBeginObject { - ejp.advanceState() - } - case jpsSawBeginObject: - ejp.advanceState() - case jpsSawValue, jpsSawEndObject, jpsSawEndArray: - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject, jpsSawComma: - ejp.advanceState() - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsDoneState: - return "", 0, io.EOF - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, ErrInvalidJSON - } - case jpsSawKey: // do nothing (key was peeked before) - default: - return "", 0, invalidRequestError("key") - } - - // read key - var key string - - switch ejp.s { - case jpsSawKey: - key = ejp.k - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, invalidRequestError("key") - } - - // check for colon - ejp.advanceState() - if err := ensureColon(ejp.s, key); err != nil { - return "", 0, err - } - - // peek at the value to determine type - t, err := ejp.peekType() - if err != nil { - return "", 0, err - } - - return key, t, nil -} - -// readValue returns the value corresponding to the Type returned by peekType -func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { - if ejp.s == jpsInvalidState { - return nil, ejp.err - } - - var v *extJSONValue - - switch t { - case bsontype.Null, bsontype.Boolean, bsontype.String: - if ejp.s != jpsSawValue { - return nil, invalidRequestError(t.String()) - } - v = ejp.v - case bsontype.Int32, bsontype.Int64, bsontype.Double: - // relaxed version allows these to be literal number values - if ejp.s == jpsSawValue { - v = ejp.v - break - } - fallthrough - case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { - return nil, invalidJSONErrorForType("value", t) - } - - v = ejp.v - - // read end object - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("} after value", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: - if ejp.s != jpsSawKey { - return nil, invalidRequestError(t.String()) - } - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - if t == bsontype.Binary && ejp.s == jpsSawValue { - // convert relaxed $uuid format - if ejp.relaxedUUID { - defer func() { ejp.relaxedUUID = false }() - uuid, err := ejp.v.parseSymbol() - if err != nil { - return nil, err - } - - // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing - // in the 8th, 13th, 18th, and 23rd characters. - // - // See https://tools.ietf.org/html/rfc4122#section-3 - valid := len(uuid) == 36 && - string(uuid[8]) == "-" && - string(uuid[13]) == "-" && - string(uuid[18]) == "-" && - string(uuid[23]) == "-" - if !valid { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // remove hyphens - uuidNoHyphens := strings.Replace(uuid, "-", "", -1) - if len(uuidNoHyphens) != 32 { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // convert hex to bytes - bytes, err := hex.DecodeString(uuidNoHyphens) - if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) - } - - base64 := &extJSONValue{ - t: bsontype.String, - v: base64.StdEncoding.EncodeToString(bytes), - } - subType := &extJSONValue{ - t: bsontype.String, - v: "04", - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - - break - } - - // convert legacy $binary format - base64 := ejp.v - - ejp.advanceState() - if ejp.s != jpsSawComma { - return nil, invalidJSONErrorForType(",", bsontype.Binary) - } - - ejp.advanceState() - key, t, err := ejp.readKey() - if err != nil { - return nil, err - } - if key != "$type" { - return nil, invalidJSONErrorForType("$type", bsontype.Binary) - } - - subType, err := ejp.readValue(t) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - break - } - - // read KV pairs - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONErrorForType("{", t) - } - - keys, vals, err := ejp.readObject(2, true) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) - } - - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - - case bsontype.DateTime: - switch ejp.s { - case jpsSawValue: - v = ejp.v - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject: - keys, vals, err := ejp.readObject(1, true) - if err != nil { - return nil, err - } - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - case jpsSawValue: - if ejp.canonical { - return nil, invalidJSONError("{") - } - v = ejp.v - default: - if ejp.canonical { - return nil, invalidJSONErrorForType("object", t) - } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("value and then }", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.JavaScript: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue { - return nil, invalidJSONErrorForType("value", t) - } - v = ejp.v - - // read end object or comma and just return - ejp.advanceState() - case jpsSawEndObject: - v = ejp.v - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.CodeWithScope: - if ejp.s == jpsSawKey && ejp.k == "$scope" { - v = ejp.v // this is the $code string from earlier - - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONError("$scope to be embedded document") - } - } else { - return nil, invalidRequestError(t.String()) - } - case bsontype.EmbeddedDocument, bsontype.Array: - return nil, invalidRequestError(t.String()) - } - - return v, nil -} - -// readObject is a utility method for reading full objects of known (or expected) size -// it is useful for extended JSON types such as binary, datetime, regex, and timestamp -func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { - keys := make([]string, numKeys) - vals := make([]*extJSONValue, numKeys) - - if !started { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, nil, invalidJSONError("{") - } - } - - for i := 0; i < numKeys; i++ { - key, t, err := ejp.readKey() - if err != nil { - return nil, nil, err - } - - switch ejp.s { - case jpsSawKey: - v, err := ejp.readValue(t) - if err != nil { - return nil, nil, err - } - - keys[i] = key - vals[i] = v - case jpsSawValue: - keys[i] = key - vals[i] = ejp.v - default: - return nil, nil, invalidJSONError("value") - } - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, nil, invalidJSONError("}") - } - - return keys, vals, nil -} - -// advanceState reads the next JSON token from the scanner and transitions -// from the current state based on that token's type -func (ejp *extJSONParser) advanceState() { - if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { - return - } - - jt, err := ejp.js.nextToken() - - if err != nil { - ejp.err = err - ejp.s = jpsInvalidState - return - } - - valid := ejp.validateToken(jt.t) - if !valid { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - return - } - - switch jt.t { - case jttBeginObject: - ejp.s = jpsSawBeginObject - ejp.pushMode(jpmObjectMode) - ejp.depth++ - - if ejp.depth > ejp.maxDepth { - ejp.err = nestingDepthError(jt.p, ejp.depth) - ejp.s = jpsInvalidState - } - case jttEndObject: - ejp.s = jpsSawEndObject - ejp.depth-- - - if ejp.popMode() != jpmObjectMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttBeginArray: - ejp.s = jpsSawBeginArray - ejp.pushMode(jpmArrayMode) - case jttEndArray: - ejp.s = jpsSawEndArray - - if ejp.popMode() != jpmArrayMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttColon: - ejp.s = jpsSawColon - case jttComma: - ejp.s = jpsSawComma - case jttEOF: - ejp.s = jpsDoneState - if len(ejp.m) != 0 { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttString: - switch ejp.s { - case jpsSawComma: - if ejp.peekMode() == jpmArrayMode { - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - return - } - fallthrough - case jpsSawBeginObject: - ejp.s = jpsSawKey - ejp.k = jt.v.(string) - return - } - fallthrough - default: - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - } -} - -var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ - jpsStartState: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - jttEOF: true, - }, - jpsSawBeginObject: { - jttEndObject: true, - jttString: true, - }, - jpsSawEndObject: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawBeginArray: { - jttBeginObject: true, - jttBeginArray: true, - jttEndArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawEndArray: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawColon: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawComma: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawKey: { - jttColon: true, - }, - jpsSawValue: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsDoneState: {}, - jpsInvalidState: {}, -} - -func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { - switch ejp.s { - case jpsSawEndObject: - // if we are at depth zero and the next token is a '{', - // we can consider it valid only if we are not in array mode. - if jtt == jttBeginObject && ejp.depth == 0 { - return ejp.peekMode() != jpmArrayMode - } - case jpsSawComma: - switch ejp.peekMode() { - // the only valid next token after a comma inside a document is a string (a key) - case jpmObjectMode: - return jtt == jttString - case jpmInvalidMode: - return false - } - } - - _, ok := jpsValidTransitionTokens[ejp.s][jtt] - return ok -} - -// ensureExtValueType returns true if the current value has the expected -// value type for single-key extended JSON types. For example, -// {"$numberInt": v} v must be TypeString -func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { - switch t { - case bsontype.MinKey, bsontype.MaxKey: - return ejp.v.t == bsontype.Int32 - case bsontype.Undefined: - return ejp.v.t == bsontype.Boolean - case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: - return ejp.v.t == bsontype.String - default: - return false - } -} - -func (ejp *extJSONParser) pushMode(m jsonParseMode) { - ejp.m = append(ejp.m, m) -} - -func (ejp *extJSONParser) popMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - m := ejp.m[l-1] - ejp.m = ejp.m[:l-1] - - return m -} - -func (ejp *extJSONParser) peekMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - return ejp.m[l-1] -} - -func extendJSONToken(jt *jsonToken) *extJSONValue { - var t bsontype.Type - - switch jt.t { - case jttInt32: - t = bsontype.Int32 - case jttInt64: - t = bsontype.Int64 - case jttDouble: - t = bsontype.Double - case jttString: - t = bsontype.String - case jttBool: - t = bsontype.Boolean - case jttNull: - t = bsontype.Null - default: - return nil - } - - return &extJSONValue{t: t, v: jt.v} -} - -func ensureColon(s jsonParseState, key string) error { - if s != jpsSawColon { - return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) - } - - return nil -} - -func invalidRequestError(s string) error { - return fmt.Errorf("invalid request to read %s", s) -} - -func invalidJSONError(expected string) error { - return fmt.Errorf("invalid JSON input; expected %s", expected) -} - -func invalidJSONErrorForType(expected string, t bsontype.Type) error { - return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) -} - -func unexpectedTokenError(jt *jsonToken) error { - switch jt.t { - case jttInt32, jttInt64, jttDouble: - return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) - case jttString: - return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) - case jttBool: - return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) - case jttNull: - return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) - case jttEOF: - return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) - default: - return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) - } -} - -func nestingDepthError(p, depth int) error { - return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go deleted file mode 100644 index 35832d7..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "fmt" - "io" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. -type ExtJSONValueReaderPool struct { - pool sync.Pool -} - -// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. -func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { - return &ExtJSONValueReaderPool{ - pool: sync.Pool{ - New: func() interface{} { - return new(extJSONValueReader) - }, - }, - } -} - -// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. -func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { - vr := bvrp.pool.Get().(*extJSONValueReader) - return vr.reset(r, canonical) -} - -// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing -// is inserted into the pool and ok will be false. -func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { - bvr, ok := vr.(*extJSONValueReader) - if !ok { - return false - } - - bvr, _ = bvr.reset(nil, false) - bvrp.pool.Put(bvr) - return true -} - -type ejvrState struct { - mode mode - vType bsontype.Type - depth int -} - -// extJSONValueReader is for reading extended JSON. -type extJSONValueReader struct { - p *extJSONParser - - stack []ejvrState - frame int -} - -// NewExtJSONValueReader creates a new ValueReader from a given io.Reader -// It will interpret the JSON of r as canonical or relaxed according to the -// given canonical flag -func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { - return newExtJSONValueReader(r, canonical) -} - -func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { - ejvr := new(extJSONValueReader) - return ejvr.reset(r, canonical) -} - -func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { - p := newExtJSONParser(r, canonical) - typ, err := p.peekType() - - if err != nil { - return nil, ErrInvalidJSON - } - - var m mode - switch typ { - case bsontype.EmbeddedDocument: - m = mTopLevel - case bsontype.Array: - m = mArray - default: - m = mValue - } - - stack := make([]ejvrState, 1, 5) - stack[0] = ejvrState{ - mode: m, - vType: typ, - } - return &extJSONValueReader{ - p: p, - stack: stack, - }, nil -} - -func (ejvr *extJSONValueReader) advanceFrame() { - if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack - length := len(ejvr.stack) - if length+1 >= cap(ejvr.stack) { - // double it - buf := make([]ejvrState, 2*cap(ejvr.stack)+1) - copy(buf, ejvr.stack) - ejvr.stack = buf - } - ejvr.stack = ejvr.stack[:length+1] - } - ejvr.frame++ - - // Clean the stack - ejvr.stack[ejvr.frame].mode = 0 - ejvr.stack[ejvr.frame].vType = 0 - ejvr.stack[ejvr.frame].depth = 0 -} - -func (ejvr *extJSONValueReader) pushDocument() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mDocument - ejvr.stack[ejvr.frame].depth = ejvr.p.depth -} - -func (ejvr *extJSONValueReader) pushCodeWithScope() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mCodeWithScope -} - -func (ejvr *extJSONValueReader) pushArray() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mArray -} - -func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = m - ejvr.stack[ejvr.frame].vType = t -} - -func (ejvr *extJSONValueReader) pop() { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - ejvr.frame-- - case mDocument, mArray, mCodeWithScope: - ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... - } -} - -func (ejvr *extJSONValueReader) skipObject() { - // read entire object until depth returns to 0 (last ending } or ] seen) - depth := 1 - for depth > 0 { - ejvr.p.advanceState() - - // If object is empty, raise depth and continue. When emptyObject is true, the - // parser has already read both the opening and closing brackets of an empty - // object ("{}"), so the next valid token will be part of the parent document, - // not part of the nested document. - // - // If there is a comma, there are remaining fields, emptyObject must be set back - // to false, and comma must be skipped with advanceState(). - if ejvr.p.emptyObject { - if ejvr.p.s == jpsSawComma { - ejvr.p.emptyObject = false - ejvr.p.advanceState() - } - depth-- - continue - } - - switch ejvr.p.s { - case jpsSawBeginObject, jpsSawBeginArray: - depth++ - case jpsSawEndObject, jpsSawEndArray: - depth-- - } - } -} - -func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { - te := TransitionError{ - name: name, - current: ejvr.stack[ejvr.frame].mode, - destination: destination, - modes: modes, - action: "read", - } - if ejvr.frame != 0 { - te.parent = ejvr.stack[ejvr.frame-1].mode - } - return te -} - -func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { - return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) -} - -func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != t { - return ejvr.typeError(t) - } - default: - modes := []mode{mElement, mValue} - if addModes != nil { - modes = append(modes, addModes...) - } - return ejvr.invalidTransitionErr(destination, callerName, modes) - } - - return nil -} - -func (ejvr *extJSONValueReader) Type() bsontype.Type { - return ejvr.stack[ejvr.frame].vType -} - -func (ejvr *extJSONValueReader) Skip() error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - default: - return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) - } - - defer ejvr.pop() - - t := ejvr.stack[ejvr.frame].vType - switch t { - case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: - // read entire array, doc or CodeWithScope - ejvr.skipObject() - default: - _, err := ejvr.p.readValue(t) - if err != nil { - return err - } - } - - return nil -} - -func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: // allow reading array from top level - case mArray: - return ejvr, nil - default: - if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { - return nil, err - } - } - - ejvr.pushArray() - - return ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { - if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { - return nil, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Binary) - if err != nil { - return nil, 0, err - } - - b, btype, err = v.parseBinary() - - ejvr.pop() - return b, btype, err -} - -func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { - if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { - return false, err - } - - v, err := ejvr.p.readValue(bsontype.Boolean) - if err != nil { - return false, err - } - - if v.t != bsontype.Boolean { - return false, fmt.Errorf("expected type bool, but got type %s", v.t) - } - - ejvr.pop() - return v.v.(bool), nil -} - -func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: - return ejvr, nil - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { - return nil, ejvr.typeError(bsontype.EmbeddedDocument) - } - - ejvr.pushDocument() - return ejvr, nil - default: - return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) - } -} - -func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { - if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { - return "", nil, err - } - - v, err := ejvr.p.readValue(bsontype.CodeWithScope) - if err != nil { - return "", nil, err - } - - code, err = v.parseJavascript() - - ejvr.pushCodeWithScope() - return code, ejvr, err -} - -func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { - if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { - return "", primitive.NilObjectID, err - } - - v, err := ejvr.p.readValue(bsontype.DBPointer) - if err != nil { - return "", primitive.NilObjectID, err - } - - ns, oid, err = v.parseDBPointer() - - ejvr.pop() - return ns, oid, err -} - -func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.DateTime) - if err != nil { - return 0, err - } - - d, err := v.parseDateTime() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { - if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { - return primitive.Decimal128{}, err - } - - v, err := ejvr.p.readValue(bsontype.Decimal128) - if err != nil { - return primitive.Decimal128{}, err - } - - d, err := v.parseDecimal128() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { - if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Double) - if err != nil { - return 0, err - } - - d, err := v.parseDouble() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { - if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int32) - if err != nil { - return 0, err - } - - i, err := v.parseInt32() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int64) - if err != nil { - return 0, err - } - - i, err := v.parseInt64() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { - if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.JavaScript) - if err != nil { - return "", err - } - - code, err = v.parseJavascript() - - ejvr.pop() - return code, err -} - -func (ejvr *extJSONValueReader) ReadMaxKey() error { - if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MaxKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("max") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadMinKey() error { - if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MinKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("min") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadNull() error { - if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Null) - if err != nil { - return err - } - - if v.t != bsontype.Null { - return fmt.Errorf("expected type null but got type %s", v.t) - } - - ejvr.pop() - return nil -} - -func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { - if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { - return primitive.ObjectID{}, err - } - - v, err := ejvr.p.readValue(bsontype.ObjectID) - if err != nil { - return primitive.ObjectID{}, err - } - - oid, err := v.parseObjectID() - - ejvr.pop() - return oid, err -} - -func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { - if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { - return "", "", err - } - - v, err := ejvr.p.readValue(bsontype.Regex) - if err != nil { - return "", "", err - } - - pattern, options, err = v.parseRegex() - - ejvr.pop() - return pattern, options, err -} - -func (ejvr *extJSONValueReader) ReadString() (string, error) { - if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.String) - if err != nil { - return "", err - } - - if v.t != bsontype.String { - return "", fmt.Errorf("expected type string but got type %s", v.t) - } - - ejvr.pop() - return v.v.(string), nil -} - -func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { - if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.Symbol) - if err != nil { - return "", err - } - - symbol, err = v.parseSymbol() - - ejvr.pop() - return symbol, err -} - -func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { - if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { - return 0, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Timestamp) - if err != nil { - return 0, 0, err - } - - t, i, err = v.parseTimestamp() - - ejvr.pop() - return t, i, err -} - -func (ejvr *extJSONValueReader) ReadUndefined() error { - if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Undefined) - if err != nil { - return err - } - - err = v.parseUndefined() - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel, mDocument, mCodeWithScope: - default: - return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) - } - - name, t, err := ejvr.p.readKey() - - if err != nil { - if err == ErrEOD { - if ejvr.stack[ejvr.frame].mode == mCodeWithScope { - _, err := ejvr.p.peekType() - if err != nil { - return "", nil, err - } - } - - ejvr.pop() - } - - return "", nil, err - } - - ejvr.push(mElement, t) - return name, ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mArray: - default: - return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) - } - - t, err := ejvr.p.peekType() - if err != nil { - if err == ErrEOA { - ejvr.pop() - } - - return nil, err - } - - ejvr.push(mValue, t) - return ejvr, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go deleted file mode 100644 index ba39c96..0000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. - -package bsonrw - -import "unicode/utf8" - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML