diff --git a/build/ci.go b/build/ci.go index a5233209a2..11d425cc9f 100644 --- a/build/ci.go +++ b/build/ci.go @@ -207,11 +207,13 @@ func main() { func doInstall(cmdline []string) { var ( - dlgo = flag.Bool("dlgo", false, "Download Go and build with it") - arch = flag.String("arch", "", "Architecture to cross build for") - cc = flag.String("cc", "", "C compiler to cross build with") + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") + arch = flag.String("arch", "", "Architecture to cross build for") + cc = flag.String("cc", "", "C compiler to cross build with") + staticlink = flag.Bool("static", false, "Create statically-linked executable") ) flag.CommandLine.Parse(cmdline) + env := build.Env() // Configure the toolchain. tc := build.GoToolchain{GOARCH: *arch, CC: *cc} @@ -219,10 +221,11 @@ func doInstall(cmdline []string) { csdb := build.MustLoadChecksums("build/checksums.txt") tc.Root = build.DownloadGo(csdb, dlgoVersion) } + // Disable CLI markdown doc generation in release builds. + buildTags := []string{"urfave_cli_no_docs"} // Configure the build. - env := build.Env() - gobuild := tc.Go("build", buildFlags(env)...) + gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...) // arm64 CI builders are memory-constrained and can't handle concurrent builds, // better disable it. This check isn't the best, it should probably @@ -230,7 +233,6 @@ func doInstall(cmdline []string) { if env.CI && runtime.GOARCH == "arm64" { gobuild.Args = append(gobuild.Args, "-p", "1") } - // We use -trimpath to avoid leaking local paths into the built executables. gobuild.Args = append(gobuild.Args, "-trimpath") @@ -255,7 +257,7 @@ func doInstall(cmdline []string) { } // buildFlags returns the go tool flags for building. -func buildFlags(env build.Environment) (flags []string) { +func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) { var ld []string if env.Commit != "" { ld = append(ld, "-X", "main.gitCommit="+env.Commit) @@ -269,11 +271,23 @@ func buildFlags(env build.Environment) (flags []string) { // Enforce the stacksize to 8M, which is the case on most platforms apart from // alpine Linux. if runtime.GOOS == "linux" { - ld = append(ld, "-extldflags", "-Wl,-z,stack-size=0x800000") + // Enforce the stacksize to 8M, which is the case on most platforms apart from + // alpine Linux. + extld := []string{"-Wl,-z,stack-size=0x800000"} + if staticLinking { + extld = append(extld, "-static") + // Under static linking, use of certain glibc features must be + // disabled to avoid shared library dependencies. + buildTags = append(buildTags, "osusergo", "netgo") + } + ld = append(ld, "-extldflags", "'"+strings.Join(extld, " ")+"'") } if len(ld) > 0 { flags = append(flags, "-ldflags", strings.Join(ld, " ")) } + if len(buildTags) > 0 { + flags = append(flags, "-tags", strings.Join(buildTags, ",")) + } return flags } diff --git a/ctxc/downloader/downloader.go b/ctxc/downloader/downloader.go index 6e975c9222..8a0e18f6ce 100644 --- a/ctxc/downloader/downloader.go +++ b/ctxc/downloader/downloader.go @@ -487,6 +487,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I if err := d.blockchain.SetHead(origin); err != nil { return err } + log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin) } } // Initiate the sync using a concurrent header and content retrieval algorithm diff --git a/go.mod b/go.mod index 371728ba06..749a974ce2 100644 --- a/go.mod +++ b/go.mod @@ -6,18 +6,18 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 github.com/CortexFoundation/inference v1.0.2-0.20230307032835-9197d586a4e8 github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66 - github.com/CortexFoundation/torrentfs v1.0.59-0.20240129113837-7f5c3c53b617 + github.com/CortexFoundation/torrentfs v1.0.59-0.20240202155446-7354cfa88cb3 github.com/VictoriaMetrics/fastcache v1.12.2 github.com/arsham/figurine v1.3.0 github.com/aws/aws-sdk-go-v2 v1.24.1 github.com/aws/aws-sdk-go-v2/config v1.26.6 github.com/aws/aws-sdk-go-v2/credentials v1.16.16 - github.com/aws/aws-sdk-go-v2/service/route53 v1.37.0 + github.com/aws/aws-sdk-go-v2/service/route53 v1.37.1 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/cp v1.1.1 github.com/charmbracelet/bubbletea v0.25.0 - github.com/cloudflare/cloudflare-go v0.86.0 - github.com/cockroachdb/pebble v0.0.0-20240123194302-5b280af78f31 + github.com/cloudflare/cloudflare-go v0.87.0 + github.com/cockroachdb/pebble v0.0.0-20240202151741-904a6c99689c github.com/consensys/gnark-crypto v0.12.1 github.com/crate-crypto/go-kzg-4844 v0.7.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc @@ -83,7 +83,7 @@ require ( github.com/CortexFoundation/robot v1.0.7-0.20240101144533-02756bceae73 // indirect github.com/CortexFoundation/wormhole v0.0.2-0.20231221155549-5c938553b5fc // indirect github.com/DataDog/zstd v1.5.6-0.20230622172052-ea68dcab66c0 // indirect - github.com/RoaringBitmap/roaring v1.8.0 // indirect + github.com/RoaringBitmap/roaring v1.9.0 // indirect github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/anacrolix/chansync v0.4.0 // indirect @@ -102,8 +102,8 @@ require ( github.com/anacrolix/torrent v1.53.3 // indirect github.com/anacrolix/upnp v0.1.3 // indirect github.com/anacrolix/utp v0.2.0 // indirect - github.com/antlabs/stl v0.0.1 // indirect - github.com/antlabs/timer v0.0.12 // indirect + github.com/antlabs/stl v0.0.2 // indirect + github.com/antlabs/timer v0.1.1 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/arsham/rainbow v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect @@ -158,13 +158,13 @@ require ( github.com/google/btree v1.1.2 // indirect github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/pprof v0.0.0-20240125082051-42cd04596328 // indirect + github.com/google/pprof v0.0.0-20240130152714-0ed6a68c8d9e // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect - github.com/jedib0t/go-pretty/v6 v6.5.3 // indirect + github.com/jedib0t/go-pretty/v6 v6.5.4 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.17.5 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -177,6 +177,7 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/nutsdb/nutsdb v1.0.4-0.20240108030426-a4ca1b95b3d6 // indirect github.com/nxadm/tail v1.4.11 // indirect github.com/oapi-codegen/runtime v1.1.1 // indirect @@ -217,7 +218,7 @@ require ( github.com/tklauser/numcpus v0.7.0 // indirect github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd // indirect github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb // indirect - github.com/ucwong/golang-kv v1.0.24-0.20240123222739-ef750e9bde39 // indirect + github.com/ucwong/golang-kv v1.0.24-0.20240202153641-e1b6e9221f93 // indirect github.com/ucwong/shard v1.0.1-0.20231225143310-3022d14f9c3e // indirect github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect github.com/xujiajun/mmap-go v1.0.1 // indirect @@ -233,7 +234,7 @@ require ( golang.org/x/net v0.20.0 // indirect golang.org/x/term v0.16.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/libc v1.40.8 // indirect + modernc.org/libc v1.40.13 // indirect modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.7.2 // indirect modernc.org/sqlite v1.28.0 // indirect diff --git a/go.sum b/go.sum index b4bdd74d25..f498b2f53c 100644 --- a/go.sum +++ b/go.sum @@ -68,8 +68,8 @@ github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66/go.mod h1: github.com/CortexFoundation/torrentfs v1.0.13-0.20200623060705-ce027f43f2f8/go.mod h1:Ma+tGhPPvz4CEZHaqEJQMOEGOfHeQBiAoNd1zyc/w3Q= github.com/CortexFoundation/torrentfs v1.0.14-0.20200703071639-3fcabcabf274/go.mod h1:qnb3YlIJmuetVBtC6Lsejr0Xru+1DNmDCdTqnwy7lhk= github.com/CortexFoundation/torrentfs v1.0.20-0.20200810031954-d36d26f82fcc/go.mod h1:N5BsicP5ynjXIi/Npl/SRzlJ630n1PJV2sRj0Z0t2HA= -github.com/CortexFoundation/torrentfs v1.0.59-0.20240129113837-7f5c3c53b617 h1:ulwe8PP2Mv3krzOWna0kDJLeV87yYCfZHvB5kZfogXk= -github.com/CortexFoundation/torrentfs v1.0.59-0.20240129113837-7f5c3c53b617/go.mod h1:ViWum/jqxa+88uU5QKt7IvHDY8uRICP5c1AEGB+VG4s= +github.com/CortexFoundation/torrentfs v1.0.59-0.20240202155446-7354cfa88cb3 h1:iJ61w3QOdS50+Q3KakZdPL2oi2ohb+kXzeahALFaO9w= +github.com/CortexFoundation/torrentfs v1.0.59-0.20240202155446-7354cfa88cb3/go.mod h1:yeOaC3RfN/W06OcChUpwKBOhxMc/oBcDUYpqzw6wXFw= github.com/CortexFoundation/wormhole v0.0.2-0.20231221155549-5c938553b5fc h1:LkbBJvvbfraAg/joFxPZtDvmdrjuTqPfbGhdnrEq7GM= github.com/CortexFoundation/wormhole v0.0.2-0.20231221155549-5c938553b5fc/go.mod h1:ipzmPabDgzYKUbXkGVe2gTkBEp+MsDx6pXGiuYzmP6s= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -84,8 +84,8 @@ github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrX github.com/RoaringBitmap/roaring v0.4.18/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v1.8.0 h1:h3Tbzc/4K7sW3sRMlBdOIW77x9rikkqvOgU/j+ofkn0= -github.com/RoaringBitmap/roaring v1.8.0/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= +github.com/RoaringBitmap/roaring v1.9.0 h1:lwKhr90/j0jVXJyh5X+vQN1VVn77rQFfYnh6RDRGCcE= +github.com/RoaringBitmap/roaring v1.9.0/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -224,10 +224,10 @@ github.com/anacrolix/utp v0.2.0 h1:65Cdmr6q9WSw2KsM+rtJFu7rqDzLl2bdysf4KlNPcFI= github.com/anacrolix/utp v0.2.0/go.mod h1:HGk4GYQw1O/3T1+yhqT/F6EcBd+AAwlo9dYErNy7mj8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antlabs/stl v0.0.1 h1:TRD3csCrjREeLhLoQ/supaoCvFhNLBTNIwuRGrDIs6Q= -github.com/antlabs/stl v0.0.1/go.mod h1:wvVwP1loadLG3cRjxUxK8RL4Co5xujGaZlhbztmUEqQ= -github.com/antlabs/timer v0.0.12 h1:ZSLJAy9K28aGEQuFv2thmddvnT+uWnBo2RRsc5mfqFY= -github.com/antlabs/timer v0.0.12/go.mod h1:JNV8J3yGvMKhCavGXgj9HXrVZkfdQyKCcqXBT8RdyuU= +github.com/antlabs/stl v0.0.2 h1:sna1AXR5yIkNE9lWhCcKbheFJSVfCa3vugnGyakI79s= +github.com/antlabs/stl v0.0.2/go.mod h1:kKrO4xrn9cfS1mJVo+/BqePZjAYMXqD0amGF2Ouq7ac= +github.com/antlabs/timer v0.1.1 h1:YVR7kfH84awt7uJyAdc9IILdA/BQaSKeEUOa2kKuEo0= +github.com/antlabs/timer v0.1.1/go.mod h1:mpw4zlD5KVjstEyUDp43DGLWsY076Mdo4bS78NTseRE= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -273,8 +273,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/route53 v1.37.0 h1:f3hBZWtpn9clZGXJoqahQeec9ZPZnu22g8pg+zNyif0= -github.com/aws/aws-sdk-go-v2/service/route53 v1.37.0/go.mod h1:8qqfpG4mug2JLlEyWPSFhEGvJiaZ9iPmMDDMYc5Xtas= +github.com/aws/aws-sdk-go-v2/service/route53 v1.37.1 h1:U7OksynDSIFScG+7sGqOuJh+fP1USMkNtjxzGFZYG34= +github.com/aws/aws-sdk-go-v2/service/route53 v1.37.1/go.mod h1:8qqfpG4mug2JLlEyWPSFhEGvJiaZ9iPmMDDMYc5Xtas= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= @@ -345,8 +345,8 @@ github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMn github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.11.7/go.mod h1:GyEn0B58Zvn/XOrkE/R31DrKqjTsBQ9E5ICzRlE09hk= -github.com/cloudflare/cloudflare-go v0.86.0 h1:jEKN5VHNYNYtfDL2lUFLTRo+nOVNPFxpXTstVx0rqHI= -github.com/cloudflare/cloudflare-go v0.86.0/go.mod h1:wYW/5UP02TUfBToa/yKbQHV+r6h1NnJ1Je7XjuGM4Jw= +github.com/cloudflare/cloudflare-go v0.87.0 h1:hLuXnDneECNpen4YwfA4+kcjyv8gsj30kOJsHPyw9pI= +github.com/cloudflare/cloudflare-go v0.87.0/go.mod h1:wYW/5UP02TUfBToa/yKbQHV+r6h1NnJ1Je7XjuGM4Jw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= @@ -357,8 +357,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= -github.com/cockroachdb/pebble v0.0.0-20240123194302-5b280af78f31 h1:wtLGmUF5ww2DdU+r94i8XaZ6Hyf5KT3Fcvpfg0QxCJY= -github.com/cockroachdb/pebble v0.0.0-20240123194302-5b280af78f31/go.mod h1:BHuaMa/lK7fUe75BlsteiiTu8ptIG+qSAuDtGMArP18= +github.com/cockroachdb/pebble v0.0.0-20240202151741-904a6c99689c h1:pR4Dm7LmNAAk0s4LCx4PaYdN4rTwq+ep4Zv+Ico65HU= +github.com/cockroachdb/pebble v0.0.0-20240202151741-904a6c99689c/go.mod h1:BHuaMa/lK7fUe75BlsteiiTu8ptIG+qSAuDtGMArP18= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= @@ -621,8 +621,8 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= -github.com/google/pprof v0.0.0-20240125082051-42cd04596328 h1:oI+lCI2DY1BsRrdzMJBhIMxBBdlZJl31YNQC11EiyvA= -github.com/google/pprof v0.0.0-20240125082051-42cd04596328/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240130152714-0ed6a68c8d9e h1:E+3PBMCXn0ma79O7iCrne0iUpKtZ7rIcZvoz+jNtNtw= +github.com/google/pprof v0.0.0-20240130152714-0ed6a68c8d9e/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -737,8 +737,8 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1: github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jedib0t/go-pretty/v6 v6.5.3 h1:GIXn6Er/anHTkVUoufs7ptEvxdD6KIhR7Axa2wYCPF0= -github.com/jedib0t/go-pretty/v6 v6.5.3/go.mod h1:5LQIxa52oJ/DlDSLv0HEkWOFMDGoWkJb9ss5KqPpJBg= +github.com/jedib0t/go-pretty/v6 v6.5.4 h1:gOGo0613MoqUcf0xCj+h/V3sHDaZasfv152G6/5l91s= +github.com/jedib0t/go-pretty/v6 v6.5.4/go.mod h1:5LQIxa52oJ/DlDSLv0HEkWOFMDGoWkJb9ss5KqPpJBg= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -892,6 +892,8 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nutsdb/nutsdb v1.0.4-0.20240108030426-a4ca1b95b3d6 h1:3UFAJdFnbWAI2ThsGNUxzbEfL2pfyyFqRU/DmuKFjd8= @@ -1201,7 +1203,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -1242,8 +1243,8 @@ github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd h1:gBtlvLAsgLk+ github.com/ucwong/filecache v1.0.6-0.20230405163841-810d53ced4bd/go.mod h1:ddwX+NCjMZPdpzcGh1fcEbNTUTCtKgt2hC2rqvmLKgA= github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb h1:dVZH3AH9f7zB3VBmsjn25B7lfcAyMP4QxdFYTrfj7tg= github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb/go.mod h1:3yswsBsVuwsOjDvFfC5Na9XSEf4HC7mj3W3g6jvSY/s= -github.com/ucwong/golang-kv v1.0.24-0.20240123222739-ef750e9bde39 h1:KPCSgOAv9BkbPFstvGthukhSPBoG01JDK/Vmk6zaETA= -github.com/ucwong/golang-kv v1.0.24-0.20240123222739-ef750e9bde39/go.mod h1:kjcnK5dlWnps58dYfjtz39jta0LuLTlmch/m6Lk6wQg= +github.com/ucwong/golang-kv v1.0.24-0.20240202153641-e1b6e9221f93 h1:rzCRX7oPCmLDeR7EYDDj+9KruMpq/QgVVJBZjLLF01g= +github.com/ucwong/golang-kv v1.0.24-0.20240202153641-e1b6e9221f93/go.mod h1:EmULQA/O10n3nfk6RM4svxrFiTlHVWvezx4XDS6lZic= github.com/ucwong/golang-set v1.8.1-0.20200419153428-d7b0b1ac2d43/go.mod h1:xu0FaiQFGbBcFZj2o7udZ5rbA8jRTsv47hkPoG5qQNM= github.com/ucwong/goleveldb v1.0.3-0.20200508074755-578cba616f37/go.mod h1:dgJUTtDxq/ne6/JzZhHzF24OL/uqILz9IWk8HmT4V2g= github.com/ucwong/goleveldb v1.0.3-0.20200618184106-f1c6bc3a428b/go.mod h1:7Sq6w7AfEZuB/a6mrlvHCSXCSkqojCMMrM3Ei12QAT0= @@ -1744,8 +1745,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= @@ -1784,8 +1783,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -modernc.org/libc v1.40.8 h1:ZHN83BZzEytp4ctJMC2lxGTU3l8jo+2kGW7AUatIDZw= -modernc.org/libc v1.40.8/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= +modernc.org/libc v1.40.13 h1:oGU9RTac2PYvPu9ZOrLCu3HQGG8mqdx1oWT83oAaI0E= +modernc.org/libc v1.40.13/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md index 7ac49b1023..67a417c55f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.37.1 (2024-01-30) + +* **Documentation**: Update the SDKs for text changes in the APIs. + # v1.37.0 (2024-01-10) * **Feature**: Route53 now supports geoproximity routing in AWS regions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go index 28e26a96e8..5795744e23 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/go_module_metadata.go @@ -3,4 +3,4 @@ package route53 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.37.0" +const goModuleVersion = "1.37.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go index 3696e8b478..bb5c97e00f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/route53/types/types.go @@ -632,9 +632,8 @@ type GeoLocationDetails struct { noSmithyDocumentSerde } -// (Resource record sets only): A complex type that lets you control how Amazon -// Route 53 responds to DNS queries based on the geographic origin of the query and -// your resources. Only one of , LocalZoneGroup , Coordinates , or Amazon Web +// (Resource record sets only): A complex type that lets you specify where your +// resources are located. Only one of LocalZoneGroup , Coordinates , or Amazon Web // ServicesRegion is allowed per request at a time. For more information about // geoproximity routing, see Geoproximity routing (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy-geoproximity.html) // in the Amazon Route 53 Developer Guide. diff --git a/vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md b/vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md index 045234f388..2a2f071b43 100644 --- a/vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md +++ b/vendor/github.com/cloudflare/cloudflare-go/CHANGELOG.md @@ -1,4 +1,25 @@ -## 0.87.0 (Unreleased) +## 0.88.0 (Unreleased) + +## 0.87.0 (January 31st, 2024) + +ENHANCEMENTS: + +* access_seats: Add `UpdateAccessUsersSeats` with an array as input for multiple operations ([#1480](https://github.com/cloudflare/cloudflare-go/issues/1480)) +* dlp: add support for EDM and CWL datasets ([#1485](https://github.com/cloudflare/cloudflare-go/issues/1485)) +* logpush: Add support for Output Options ([#1468](https://github.com/cloudflare/cloudflare-go/issues/1468)) +* pages_project: Add `build_caching` attribute ([#1489](https://github.com/cloudflare/cloudflare-go/issues/1489)) +* streams: adds support for stream create parameters for tus upload initiate ([#1386](https://github.com/cloudflare/cloudflare-go/issues/1386)) +* teams_accounts: add support for extended email matching ([#1486](https://github.com/cloudflare/cloudflare-go/issues/1486)) + +BUG FIXES: + +* access_seats: UpdateAccessUserSeat: fix parameters not being an array when sending to the api. This caused an error when updating a user's seat ([#1480](https://github.com/cloudflare/cloudflare-go/issues/1480)) +* access_users: ListAccessUsers was returning wrong values in pointer fields due to variable missused in loop ([#1482](https://github.com/cloudflare/cloudflare-go/issues/1482)) +* flarectl: alias zone certs to "ct" instead of duplicating the "c" alias ([#1484](https://github.com/cloudflare/cloudflare-go/issues/1484)) + +DEPENDENCIES: + +* deps: bumps actions/cache from 3 to 4 ([#1483](https://github.com/cloudflare/cloudflare-go/issues/1483)) ## 0.86.0 (January 17, 2024) diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_seats.go b/vendor/github.com/cloudflare/cloudflare-go/access_seats.go index c1d6bf2130..ea44eebc7b 100644 --- a/vendor/github.com/cloudflare/cloudflare-go/access_seats.go +++ b/vendor/github.com/cloudflare/cloudflare-go/access_seats.go @@ -28,6 +28,13 @@ type UpdateAccessUserSeatParams struct { GatewaySeat *bool `json:"gateway_seat"` } +// UpdateAccessUsersSeatsParams represents the update payload for multiple access seats. +type UpdateAccessUsersSeatsParams []struct { + SeatUID string `json:"seat_uid,omitempty"` + AccessSeat *bool `json:"access_seat"` + GatewaySeat *bool `json:"gateway_seat"` +} + // AccessUserSeatResponse represents the response from the access user seat endpoints. type UpdateAccessUserSeatResponse struct { Response @@ -35,7 +42,7 @@ type UpdateAccessUserSeatResponse struct { ResultInfo `json:"result_info"` } -// UpdateAccessUserSeat updates a Access User Seat. +// UpdateAccessUserSeat updates a single Access User Seat. // // API documentation: https://developers.cloudflare.com/api/operations/zero-trust-seats-update-a-user-seat func (api *API) UpdateAccessUserSeat(ctx context.Context, rc *ResourceContainer, params UpdateAccessUserSeatParams) ([]AccessUpdateAccessUserSeatResult, error) { @@ -53,6 +60,42 @@ func (api *API) UpdateAccessUserSeat(ctx context.Context, rc *ResourceContainer, rc.Identifier, ) + // this requests expects an array of params, but this method only accepts a single param + res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, []UpdateAccessUserSeatParams{params}) + if err != nil { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf("%s: %w", errMakeRequestError, err) + } + + var updateAccessUserSeatResponse UpdateAccessUserSeatResponse + err = json.Unmarshal(res, &updateAccessUserSeatResponse) + if err != nil { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return updateAccessUserSeatResponse.Result, nil +} + +// UpdateAccessUsersSeats updates many Access User Seats. +// +// API documentation: https://developers.cloudflare.com/api/operations/zero-trust-seats-update-a-user-seat +func (api *API) UpdateAccessUsersSeats(ctx context.Context, rc *ResourceContainer, params UpdateAccessUsersSeatsParams) ([]AccessUpdateAccessUserSeatResult, error) { + if rc.Level != AccountRouteLevel { + return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf(errInvalidResourceContainerAccess, rc.Level) + } + + for _, param := range params { + if param.SeatUID == "" { + return []AccessUpdateAccessUserSeatResult{}, errMissingAccessSeatUID + } + } + + uri := fmt.Sprintf( + "/%s/%s/access/seats", + rc.Level, + rc.Identifier, + ) + + // this requests expects an array of params, but this method only accepts a single param res, err := api.makeRequestContext(ctx, http.MethodPatch, uri, params) if err != nil { return []AccessUpdateAccessUserSeatResult{}, fmt.Errorf("%s: %w", errMakeRequestError, err) diff --git a/vendor/github.com/cloudflare/cloudflare-go/access_users.go b/vendor/github.com/cloudflare/cloudflare-go/access_users.go index 690bc446e0..233ceb2bb6 100644 --- a/vendor/github.com/cloudflare/cloudflare-go/access_users.go +++ b/vendor/github.com/cloudflare/cloudflare-go/access_users.go @@ -223,7 +223,7 @@ func (api *API) ListAccessUsers(ctx context.Context, rc *ResourceContainer, para } var accessUsers []AccessUser - var r AccessUserListResponse + var resultInfo *ResultInfo = nil for { uri := buildURI(baseURL, params) @@ -231,6 +231,8 @@ func (api *API) ListAccessUsers(ctx context.Context, rc *ResourceContainer, para if err != nil { return []AccessUser{}, &ResultInfo{}, fmt.Errorf("%s: %w", errMakeRequestError, err) } + var r AccessUserListResponse + resultInfo = &r.ResultInfo err = json.Unmarshal(res, &r) if err != nil { @@ -243,7 +245,7 @@ func (api *API) ListAccessUsers(ctx context.Context, rc *ResourceContainer, para } } - return accessUsers, &r.ResultInfo, nil + return accessUsers, resultInfo, nil } // GetAccessUserActiveSessions returns a list of active sessions for an user. diff --git a/vendor/github.com/cloudflare/cloudflare-go/dlp_dataset.go b/vendor/github.com/cloudflare/cloudflare-go/dlp_dataset.go new file mode 100644 index 0000000000..09cf86416c --- /dev/null +++ b/vendor/github.com/cloudflare/cloudflare-go/dlp_dataset.go @@ -0,0 +1,278 @@ +package cloudflare + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/goccy/go-json" +) + +var ( + ErrMissingDatasetID = errors.New("missing required dataset ID") +) + +// DLPDatasetUpload represents a single upload version attached to a DLP dataset. +type DLPDatasetUpload struct { + NumCells int `json:"num_cells"` + Status string `json:"status,omitempty"` + Version int `json:"version"` +} + +// DLPDataset represents a DLP Exact Data Match dataset or Custom Word List. +type DLPDataset struct { + CreatedAt *time.Time `json:"created_at,omitempty"` + Description string `json:"description,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + NumCells int `json:"num_cells"` + Secret *bool `json:"secret,omitempty"` + Status string `json:"status,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` + Uploads []DLPDatasetUpload `json:"uploads"` +} + +type ListDLPDatasetsParams struct{} + +type DLPDatasetListResponse struct { + Result []DLPDataset `json:"result"` + Response +} + +// ListDLPDatasets returns all the DLP datasets associated with an account. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-read-all +func (api *API) ListDLPDatasets(ctx context.Context, rc *ResourceContainer, params ListDLPDatasetsParams) ([]DLPDataset, error) { + if rc.Identifier == "" { + return nil, nil + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + var dlpDatasetListResponse DLPDatasetListResponse + err = json.Unmarshal(res, &dlpDatasetListResponse) + if err != nil { + return nil, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetListResponse.Result, nil +} + +type DLPDatasetGetResponse struct { + Result DLPDataset `json:"result"` + Response +} + +// GetDLPDataset returns a DLP dataset based on the dataset ID. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-read +func (api *API) GetDLPDataset(ctx context.Context, rc *ResourceContainer, datasetID string) (DLPDataset, error) { + if rc.Identifier == "" { + return DLPDataset{}, nil + } + + if datasetID == "" { + return DLPDataset{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s", rc.Level, rc.Identifier, datasetID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return DLPDataset{}, err + } + + var dlpDatasetGetResponse DLPDatasetGetResponse + err = json.Unmarshal(res, &dlpDatasetGetResponse) + if err != nil { + return DLPDataset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetGetResponse.Result, nil +} + +type CreateDLPDatasetParams struct { + Description string `json:"description,omitempty"` + Name string `json:"name"` + Secret *bool `json:"secret,omitempty"` +} + +type CreateDLPDatasetResult struct { + MaxCells int `json:"max_cells"` + Secret string `json:"secret"` + Version int `json:"version"` + Dataset DLPDataset `json:"dataset"` +} + +type CreateDLPDatasetResponse struct { + Result CreateDLPDatasetResult `json:"result"` + Response +} + +// CreateDLPDataset creates a DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-create +func (api *API) CreateDLPDataset(ctx context.Context, rc *ResourceContainer, params CreateDLPDatasetParams) (CreateDLPDatasetResult, error) { + if rc.Identifier == "" { + return CreateDLPDatasetResult{}, nil + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets", rc.Level, rc.Identifier), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params) + if err != nil { + return CreateDLPDatasetResult{}, err + } + + var CreateDLPDatasetResponse CreateDLPDatasetResponse + err = json.Unmarshal(res, &CreateDLPDatasetResponse) + if err != nil { + return CreateDLPDatasetResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return CreateDLPDatasetResponse.Result, nil +} + +// DeleteDLPDataset deletes a DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-delete +func (api *API) DeleteDLPDataset(ctx context.Context, rc *ResourceContainer, datasetID string) error { + if rc.Identifier == "" { + return ErrMissingResourceIdentifier + } + + if datasetID == "" { + return ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s", rc.Level, rc.Identifier, datasetID), nil) + _, err := api.makeRequestContext(ctx, http.MethodDelete, uri, nil) + return err +} + +type UpdateDLPDatasetParams struct { + DatasetID string + Description *string `json:"description,omitempty"` // nil to leave descrption as-is + Name *string `json:"name,omitempty"` // nil to leave name as-is +} + +type UpdateDLPDatasetResponse struct { + Result DLPDataset `json:"result"` + Response +} + +// UpdateDLPDataset updates the details of a DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-update +func (api *API) UpdateDLPDataset(ctx context.Context, rc *ResourceContainer, params UpdateDLPDatasetParams) (DLPDataset, error) { + if rc.Identifier == "" { + return DLPDataset{}, nil + } + + if params.DatasetID == "" { + return DLPDataset{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s", rc.Level, rc.Identifier, params.DatasetID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPut, uri, params) + if err != nil { + return DLPDataset{}, err + } + + var updateDLPDatasetResponse UpdateDLPDatasetResponse + err = json.Unmarshal(res, &updateDLPDatasetResponse) + if err != nil { + return DLPDataset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return updateDLPDatasetResponse.Result, nil +} + +type CreateDLPDatasetUploadResult struct { + MaxCells int `json:"max_cells"` + Secret string `json:"secret"` + Version int `json:"version"` +} + +type CreateDLPDatasetUploadResponse struct { + Result CreateDLPDatasetUploadResult `json:"result"` + Response +} +type CreateDLPDatasetUploadParams struct { + DatasetID string +} + +// CreateDLPDatasetUpload creates a new upload version for the specified DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-create-version +func (api *API) CreateDLPDatasetUpload(ctx context.Context, rc *ResourceContainer, params CreateDLPDatasetUploadParams) (CreateDLPDatasetUploadResult, error) { + if rc.Identifier == "" { + return CreateDLPDatasetUploadResult{}, nil + } + + if params.DatasetID == "" { + return CreateDLPDatasetUploadResult{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s/upload", rc.Level, rc.Identifier, params.DatasetID), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, nil) + if err != nil { + return CreateDLPDatasetUploadResult{}, err + } + + var dlpDatasetCreateUploadResponse CreateDLPDatasetUploadResponse + err = json.Unmarshal(res, &dlpDatasetCreateUploadResponse) + if err != nil { + return CreateDLPDatasetUploadResult{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetCreateUploadResponse.Result, nil +} + +type UploadDLPDatasetVersionParams struct { + DatasetID string + Version int + Body interface{} +} + +type UploadDLPDatasetVersionResponse struct { + Result DLPDataset `json:"result"` + Response +} + +// UploadDLPDatasetVersion uploads a new version of the specified DLP dataset. +// +// API reference: https://developers.cloudflare.com/api/operations/dlp-datasets-upload-version +func (api *API) UploadDLPDatasetVersion(ctx context.Context, rc *ResourceContainer, params UploadDLPDatasetVersionParams) (DLPDataset, error) { + if rc.Identifier == "" { + return DLPDataset{}, nil + } + + if params.DatasetID == "" { + return DLPDataset{}, ErrMissingDatasetID + } + + uri := buildURI(fmt.Sprintf("/%s/%s/dlp/datasets/%s/upload/%d", rc.Level, rc.Identifier, params.DatasetID, params.Version), nil) + + res, err := api.makeRequestContext(ctx, http.MethodPost, uri, params.Body) + if err != nil { + return DLPDataset{}, err + } + + var dlpDatasetUploadVersionResponse UploadDLPDatasetVersionResponse + err = json.Unmarshal(res, &dlpDatasetUploadVersionResponse) + if err != nil { + return DLPDataset{}, fmt.Errorf("%s: %w", errUnmarshalError, err) + } + + return dlpDatasetUploadVersionResponse.Result, nil +} diff --git a/vendor/github.com/cloudflare/cloudflare-go/logpush.go b/vendor/github.com/cloudflare/cloudflare-go/logpush.go index 3fbb1af505..34e32e9cb5 100644 --- a/vendor/github.com/cloudflare/cloudflare-go/logpush.go +++ b/vendor/github.com/cloudflare/cloudflare-go/logpush.go @@ -12,22 +12,23 @@ import ( // LogpushJob describes a Logpush job. type LogpushJob struct { - ID int `json:"id,omitempty"` - Dataset string `json:"dataset"` - Enabled bool `json:"enabled"` - Kind string `json:"kind,omitempty"` - Name string `json:"name"` - LogpullOptions string `json:"logpull_options"` - DestinationConf string `json:"destination_conf"` - OwnershipChallenge string `json:"ownership_challenge,omitempty"` - LastComplete *time.Time `json:"last_complete,omitempty"` - LastError *time.Time `json:"last_error,omitempty"` - ErrorMessage string `json:"error_message,omitempty"` - Frequency string `json:"frequency,omitempty"` - Filter *LogpushJobFilters `json:"filter,omitempty"` - MaxUploadBytes int `json:"max_upload_bytes,omitempty"` - MaxUploadRecords int `json:"max_upload_records,omitempty"` - MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` + ID int `json:"id,omitempty"` + Dataset string `json:"dataset"` + Enabled bool `json:"enabled"` + Kind string `json:"kind,omitempty"` + Name string `json:"name"` + LogpullOptions string `json:"logpull_options,omitempty"` + OutputOptions *LogpushOutputOptions `json:"output_options,omitempty"` + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge,omitempty"` + LastComplete *time.Time `json:"last_complete,omitempty"` + LastError *time.Time `json:"last_error,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Frequency string `json:"frequency,omitempty"` + Filter *LogpushJobFilters `json:"filter,omitempty"` + MaxUploadBytes int `json:"max_upload_bytes,omitempty"` + MaxUploadRecords int `json:"max_upload_records,omitempty"` + MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` } type LogpushJobFilters struct { @@ -63,6 +64,21 @@ type LogpushJobFilter struct { Value interface{} `json:"value,omitempty"` } +type LogpushOutputOptions struct { + FieldNames []string `json:"field_names"` + OutputType string `json:"output_type,omitempty"` + BatchPrefix string `json:"batch_prefix,omitempty"` + BatchSuffix string `json:"batch_suffix,omitempty"` + RecordPrefix string `json:"record_prefix,omitempty"` + RecordSuffix string `json:"record_suffix,omitempty"` + RecordTemplate string `json:"record_template,omitempty"` + RecordDelimiter string `json:"record_delimiter,omitempty"` + FieldDelimiter string `json:"field_delimiter,omitempty"` + TimestampFormat string `json:"timestamp_format,omitempty"` + SampleRate float64 `json:"sample_rate,omitempty"` + CVE202144228 *bool `json:"CVE-2021-44228,omitempty"` +} + // LogpushJobsResponse is the API response, containing an array of Logpush Jobs. type LogpushJobsResponse struct { Response @@ -323,19 +339,20 @@ func (filter *LogpushJobFilter) Validate() error { } type CreateLogpushJobParams struct { - Dataset string `json:"dataset"` - Enabled bool `json:"enabled"` - Kind string `json:"kind,omitempty"` - Name string `json:"name"` - LogpullOptions string `json:"logpull_options"` - DestinationConf string `json:"destination_conf"` - OwnershipChallenge string `json:"ownership_challenge,omitempty"` - ErrorMessage string `json:"error_message,omitempty"` - Frequency string `json:"frequency,omitempty"` - Filter *LogpushJobFilters `json:"filter,omitempty"` - MaxUploadBytes int `json:"max_upload_bytes,omitempty"` - MaxUploadRecords int `json:"max_upload_records,omitempty"` - MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` + Dataset string `json:"dataset"` + Enabled bool `json:"enabled"` + Kind string `json:"kind,omitempty"` + Name string `json:"name"` + LogpullOptions string `json:"logpull_options,omitempty"` + OutputOptions *LogpushOutputOptions `json:"output_options,omitempty"` + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Frequency string `json:"frequency,omitempty"` + Filter *LogpushJobFilters `json:"filter,omitempty"` + MaxUploadBytes int `json:"max_upload_bytes,omitempty"` + MaxUploadRecords int `json:"max_upload_records,omitempty"` + MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` } type ListLogpushJobsParams struct{} @@ -349,22 +366,23 @@ type GetLogpushFieldsParams struct { } type UpdateLogpushJobParams struct { - ID int `json:"-"` - Dataset string `json:"dataset"` - Enabled bool `json:"enabled"` - Kind string `json:"kind,omitempty"` - Name string `json:"name"` - LogpullOptions string `json:"logpull_options"` - DestinationConf string `json:"destination_conf"` - OwnershipChallenge string `json:"ownership_challenge,omitempty"` - LastComplete *time.Time `json:"last_complete,omitempty"` - LastError *time.Time `json:"last_error,omitempty"` - ErrorMessage string `json:"error_message,omitempty"` - Frequency string `json:"frequency,omitempty"` - Filter *LogpushJobFilters `json:"filter,omitempty"` - MaxUploadBytes int `json:"max_upload_bytes,omitempty"` - MaxUploadRecords int `json:"max_upload_records,omitempty"` - MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` + ID int `json:"-"` + Dataset string `json:"dataset"` + Enabled bool `json:"enabled"` + Kind string `json:"kind,omitempty"` + Name string `json:"name"` + LogpullOptions string `json:"logpull_options,omitempty"` + OutputOptions *LogpushOutputOptions `json:"output_options,omitempty"` + DestinationConf string `json:"destination_conf"` + OwnershipChallenge string `json:"ownership_challenge,omitempty"` + LastComplete *time.Time `json:"last_complete,omitempty"` + LastError *time.Time `json:"last_error,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Frequency string `json:"frequency,omitempty"` + Filter *LogpushJobFilters `json:"filter,omitempty"` + MaxUploadBytes int `json:"max_upload_bytes,omitempty"` + MaxUploadRecords int `json:"max_upload_records,omitempty"` + MaxUploadIntervalSeconds int `json:"max_upload_interval_seconds,omitempty"` } type ValidateLogpushOwnershipChallengeParams struct { diff --git a/vendor/github.com/cloudflare/cloudflare-go/pages_project.go b/vendor/github.com/cloudflare/cloudflare-go/pages_project.go index 0aa7eeee89..202846158a 100644 --- a/vendor/github.com/cloudflare/cloudflare-go/pages_project.go +++ b/vendor/github.com/cloudflare/cloudflare-go/pages_project.go @@ -53,6 +53,7 @@ type PagesProjectSourceConfig struct { // PagesProjectBuildConfig represents the configuration of a Pages project build process. type PagesProjectBuildConfig struct { + BuildCaching *bool `json:"build_caching,omitempty"` BuildCommand string `json:"build_command"` DestinationDir string `json:"destination_dir"` RootDir string `json:"root_dir"` diff --git a/vendor/github.com/cloudflare/cloudflare-go/stream.go b/vendor/github.com/cloudflare/cloudflare-go/stream.go index ebb175cb86..3880649962 100644 --- a/vendor/github.com/cloudflare/cloudflare-go/stream.go +++ b/vendor/github.com/cloudflare/cloudflare-go/stream.go @@ -209,10 +209,12 @@ type StreamInitiateTUSUploadResponse struct { type TUSUploadMetadata struct { Name string `json:"name,omitempty"` + MaxDurationSeconds int `json:"maxDurationSeconds,omitempty"` RequireSignedURLs bool `json:"requiresignedurls,omitempty"` AllowedOrigins string `json:"allowedorigins,omitempty"` ThumbnailTimestampPct float64 `json:"thumbnailtimestamppct,omitempty"` ScheduledDeletion *time.Time `json:"scheduledDeletion,omitempty"` + Expiry *time.Time `json:"expiry,omitempty"` Watermark string `json:"watermark,omitempty"` } @@ -221,6 +223,9 @@ func (t TUSUploadMetadata) ToTUSCsv() (string, error) { if t.Name != "" { metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "name", base64.StdEncoding.EncodeToString([]byte(t.Name)))) } + if t.MaxDurationSeconds != 0 { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "maxDurationSeconds", base64.StdEncoding.EncodeToString([]byte(strconv.Itoa(t.MaxDurationSeconds))))) + } if t.RequireSignedURLs { metadataValues = append(metadataValues, "requiresignedurls") } @@ -233,6 +238,9 @@ func (t TUSUploadMetadata) ToTUSCsv() (string, error) { if t.ScheduledDeletion != nil { metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "scheduledDeletion", base64.StdEncoding.EncodeToString([]byte(t.ScheduledDeletion.Format(time.RFC3339))))) } + if t.Expiry != nil { + metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "expiry", base64.StdEncoding.EncodeToString([]byte(t.Expiry.Format(time.RFC3339))))) + } if t.Watermark != "" { metadataValues = append(metadataValues, fmt.Sprintf("%s %s", "watermark", base64.StdEncoding.EncodeToString([]byte(t.Watermark)))) } diff --git a/vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go b/vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go index 15665e04f5..90ae7d347f 100644 --- a/vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go +++ b/vendor/github.com/cloudflare/cloudflare-go/teams_accounts.go @@ -37,14 +37,15 @@ type TeamsConfiguration struct { } type TeamsAccountSettings struct { - Antivirus *TeamsAntivirus `json:"antivirus,omitempty"` - TLSDecrypt *TeamsTLSDecrypt `json:"tls_decrypt,omitempty"` - ActivityLog *TeamsActivityLog `json:"activity_log,omitempty"` - BlockPage *TeamsBlockPage `json:"block_page,omitempty"` - BrowserIsolation *BrowserIsolation `json:"browser_isolation,omitempty"` - FIPS *TeamsFIPS `json:"fips,omitempty"` - ProtocolDetection *TeamsProtocolDetection `json:"protocol_detection,omitempty"` - BodyScanning *TeamsBodyScanning `json:"body_scanning,omitempty"` + Antivirus *TeamsAntivirus `json:"antivirus,omitempty"` + TLSDecrypt *TeamsTLSDecrypt `json:"tls_decrypt,omitempty"` + ActivityLog *TeamsActivityLog `json:"activity_log,omitempty"` + BlockPage *TeamsBlockPage `json:"block_page,omitempty"` + BrowserIsolation *BrowserIsolation `json:"browser_isolation,omitempty"` + FIPS *TeamsFIPS `json:"fips,omitempty"` + ProtocolDetection *TeamsProtocolDetection `json:"protocol_detection,omitempty"` + BodyScanning *TeamsBodyScanning `json:"body_scanning,omitempty"` + ExtendedEmailMatching *TeamsExtendedEmailMatching `json:"extended_email_matching,omitempty"` } type BrowserIsolation struct { @@ -97,6 +98,10 @@ type TeamsBodyScanning struct { InspectionMode TeamsInspectionMode `json:"inspection_mode,omitempty"` } +type TeamsExtendedEmailMatching struct { + Enabled *bool `json:"enabled,omitempty"` +} + type TeamsRuleType = string const ( diff --git a/vendor/github.com/cockroachdb/pebble/cleaner.go b/vendor/github.com/cockroachdb/pebble/cleaner.go index 1f9262e58a..e2fa215029 100644 --- a/vendor/github.com/cockroachdb/pebble/cleaner.go +++ b/vendor/github.com/cockroachdb/pebble/cleaner.go @@ -12,7 +12,6 @@ import ( "github.com/cockroachdb/errors/oserror" "github.com/cockroachdb/pebble/internal/base" - "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/objstorage" "github.com/cockroachdb/tokenbucket" ) @@ -123,10 +122,6 @@ func (cm *cleanupManager) EnqueueJob(jobID int, obsoleteFiles []obsoleteFile) { cm.maybeLogLocked() cm.mu.Unlock() - if invariants.Enabled && len(cm.jobsCh) >= cap(cm.jobsCh)-2 { - panic("cleanup jobs queue full") - } - cm.jobsCh <- job } diff --git a/vendor/github.com/cockroachdb/pebble/compaction.go b/vendor/github.com/cockroachdb/pebble/compaction.go index b74ca19ce9..f08d7dc151 100644 --- a/vendor/github.com/cockroachdb/pebble/compaction.go +++ b/vendor/github.com/cockroachdb/pebble/compaction.go @@ -1427,38 +1427,22 @@ func (c *compaction) newRangeDelIter( bytesIterated *uint64, ) (keyspan.FragmentIterator, io.Closer, error) { opts.level = l - iter, rangeDelIter, err := newIters(context.Background(), f.FileMetadata, + iterSet, err := newIters(context.Background(), f.FileMetadata, &opts, internalIterOpts{ bytesIterated: &c.bytesIterated, bufferPool: &c.bufferPool, - }) + }, iterRangeDeletions) if err != nil { return nil, nil, err - } - // TODO(peter): It is mildly wasteful to open the point iterator only to - // immediately close it. One way to solve this would be to add new - // methods to tableCache for creating point and range-deletion iterators - // independently. We'd only want to use those methods here, - // though. Doesn't seem worth the hassle in the near term. - if err = iter.Close(); err != nil { - if rangeDelIter != nil { - err = errors.CombineErrors(err, rangeDelIter.Close()) - } - return nil, nil, err - } - if rangeDelIter == nil { + } else if iterSet.rangeDeletion == nil { // The file doesn't contain any range deletions. return nil, nil, nil } - // Ensure that rangeDelIter is not closed until the compaction is // finished. This is necessary because range tombstone processing // requires the range tombstones to be held in memory for up to the // lifetime of the compaction. - closer := rangeDelIter - rangeDelIter = noCloseIter{rangeDelIter} - - return rangeDelIter, closer, nil + return noCloseIter{iterSet.rangeDeletion}, iterSet.rangeDeletion, nil } func (c *compaction) String() string { @@ -2029,8 +2013,8 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { // https://github.com/cockroachdb/pebble/issues/389 is // implemented if #389 creates virtual sstables as output files. d.mu.versions.obsoleteTables = append(d.mu.versions.obsoleteTables, fileInfo{ - fileNum: base.PhysicalTableDiskFileNum(f.FileNum), - fileSize: f.Size, + FileNum: base.PhysicalTableDiskFileNum(f.FileNum), + FileSize: f.Size, }) } d.mu.versions.updateObsoleteTableMetricsLocked() @@ -2083,14 +2067,6 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { s = s.next continue } - if s.efos.excised.Load() { - // If a concurrent excise has happened that overlaps with one of the key - // ranges this snapshot is interested in, this EFOS cannot transition to - // a file-only snapshot as keys in that range could now be deleted. Move - // onto the next snapshot. - s = s.next - continue - } currentVersion.Ref() // NB: s.efos.transitionToFileOnlySnapshot could close s, in which @@ -2659,8 +2635,8 @@ func (d *DB) compact1(c *compaction, errChannel chan error) (err error) { // https://github.com/cockroachdb/pebble/issues/389 is // implemented if #389 creates virtual sstables as output files. d.mu.versions.obsoleteTables = append(d.mu.versions.obsoleteTables, fileInfo{ - fileNum: base.PhysicalTableDiskFileNum(f.FileNum), - fileSize: f.Size, + FileNum: base.PhysicalTableDiskFileNum(f.FileNum), + FileSize: f.Size, }) } d.mu.versions.updateObsoleteTableMetricsLocked() @@ -3601,27 +3577,27 @@ func (d *DB) scanObsoleteFiles(list []string) { if diskFileNum >= minUnflushedLogNum { continue } - fi := fileInfo{fileNum: diskFileNum} + fi := fileInfo{FileNum: diskFileNum} if stat, err := d.opts.FS.Stat(filename); err == nil { - fi.fileSize = uint64(stat.Size()) + fi.FileSize = uint64(stat.Size()) } obsoleteLogs = append(obsoleteLogs, fi) case fileTypeManifest: if diskFileNum >= manifestFileNum { continue } - fi := fileInfo{fileNum: diskFileNum} + fi := fileInfo{FileNum: diskFileNum} if stat, err := d.opts.FS.Stat(filename); err == nil { - fi.fileSize = uint64(stat.Size()) + fi.FileSize = uint64(stat.Size()) } obsoleteManifests = append(obsoleteManifests, fi) case fileTypeOptions: if diskFileNum >= d.optionsFileNum { continue } - fi := fileInfo{fileNum: diskFileNum} + fi := fileInfo{FileNum: diskFileNum} if stat, err := d.opts.FS.Stat(filename); err == nil { - fi.fileSize = uint64(stat.Size()) + fi.FileSize = uint64(stat.Size()) } obsoleteOptions = append(obsoleteOptions, fi) case fileTypeTable: @@ -3639,10 +3615,10 @@ func (d *DB) scanObsoleteFiles(list []string) { continue } fileInfo := fileInfo{ - fileNum: obj.DiskFileNum, + FileNum: obj.DiskFileNum, } if size, err := d.objProvider.Size(obj); err == nil { - fileInfo.fileSize = uint64(size) + fileInfo.FileSize = uint64(size) } obsoleteTables = append(obsoleteTables, fileInfo) @@ -3691,10 +3667,7 @@ func (d *DB) enableFileDeletions() { d.deleteObsoleteFiles(jobID) } -type fileInfo struct { - fileNum base.DiskFileNum - fileSize uint64 -} +type fileInfo = base.FileInfo // deleteObsoleteFiles enqueues a cleanup job to the cleanup manager, if necessary. // @@ -3713,7 +3686,7 @@ func (d *DB) deleteObsoleteFiles(jobID int) { // log that has not had its contents flushed to an sstable. We can recycle // the prefix of d.mu.log.queue with log numbers less than // minUnflushedLogNum. - if d.mu.log.queue[i].fileNum >= d.mu.versions.minUnflushedLogNum { + if d.mu.log.queue[i].FileNum >= d.mu.versions.minUnflushedLogNum { obsoleteLogs = d.mu.log.queue[:i] d.mu.log.queue = d.mu.log.queue[i:] d.mu.versions.metrics.WAL.Files -= int64(len(obsoleteLogs)) @@ -3725,13 +3698,13 @@ func (d *DB) deleteObsoleteFiles(jobID int) { d.mu.versions.obsoleteTables = nil for _, tbl := range obsoleteTables { - delete(d.mu.versions.zombieTables, tbl.fileNum) + delete(d.mu.versions.zombieTables, tbl.FileNum) } // Sort the manifests cause we want to delete some contiguous prefix // of the older manifests. slices.SortFunc(d.mu.versions.obsoleteManifests, func(a, b fileInfo) int { - return cmp.Compare(a.fileNum, b.fileNum) + return cmp.Compare(a.FileNum, b.FileNum) }) var obsoleteManifests []fileInfo @@ -3767,25 +3740,25 @@ func (d *DB) deleteObsoleteFiles(jobID int) { // We sort to make the order of deletions deterministic, which is nice for // tests. slices.SortFunc(f.obsolete, func(a, b fileInfo) int { - return cmp.Compare(a.fileNum, b.fileNum) + return cmp.Compare(a.FileNum, b.FileNum) }) for _, fi := range f.obsolete { dir := d.dirname switch f.fileType { case fileTypeLog: - if !noRecycle && d.logRecycler.add(fi) { + if !noRecycle && d.logRecycler.Add(fi) { continue } dir = d.walDirname case fileTypeTable: - d.tableCache.evict(fi.fileNum) + d.tableCache.evict(fi.FileNum) } filesToDelete = append(filesToDelete, obsoleteFile{ dir: dir, - fileNum: fi.fileNum, + fileNum: fi.FileNum, fileType: f.fileType, - fileSize: fi.fileSize, + fileSize: fi.FileSize, }) } } @@ -3818,9 +3791,9 @@ func merge(a, b []fileInfo) []fileInfo { a = append(a, b...) slices.SortFunc(a, func(a, b fileInfo) int { - return cmp.Compare(a.fileNum, b.fileNum) + return cmp.Compare(a.FileNum, b.FileNum) }) return slices.CompactFunc(a, func(a, b fileInfo) bool { - return a.fileNum == b.fileNum + return a.FileNum == b.FileNum }) } diff --git a/vendor/github.com/cockroachdb/pebble/db.go b/vendor/github.com/cockroachdb/pebble/db.go index 0baf20dbbd..0f0219bfbe 100644 --- a/vendor/github.com/cockroachdb/pebble/db.go +++ b/vendor/github.com/cockroachdb/pebble/db.go @@ -30,6 +30,7 @@ import ( "github.com/cockroachdb/pebble/sstable" "github.com/cockroachdb/pebble/vfs" "github.com/cockroachdb/pebble/vfs/atomicfs" + "github.com/cockroachdb/pebble/wal" "github.com/cockroachdb/tokenbucket" "github.com/prometheus/client_golang/prometheus" ) @@ -314,7 +315,7 @@ type DB struct { // reuse. Writing to a recycled log file is faster than to a new log file on // some common filesystems (xfs, and ext3/4) due to avoiding metadata // updates. - logRecycler logRecycler + logRecycler wal.LogRecycler closed *atomic.Value closedCh chan struct{} @@ -2062,7 +2063,7 @@ func (d *DB) AsyncFlush() (<-chan struct{}, error) { // Metrics returns metrics about the database. func (d *DB) Metrics() *Metrics { metrics := &Metrics{} - recycledLogsCount, recycledLogSize := d.logRecycler.stats() + recycledLogsCount, recycledLogSize := d.logRecycler.Stats() d.mu.Lock() vers := d.mu.versions.currentVersion() @@ -2099,11 +2100,11 @@ func (d *DB) Metrics() *Metrics { // during WAL rotation. Use the larger of the two for the current WAL. All // the previous WALs's fileSizes in d.mu.log.queue are already updated. metrics.WAL.PhysicalSize = metrics.WAL.Size - if len(d.mu.log.queue) > 0 && metrics.WAL.PhysicalSize < d.mu.log.queue[len(d.mu.log.queue)-1].fileSize { - metrics.WAL.PhysicalSize = d.mu.log.queue[len(d.mu.log.queue)-1].fileSize + if len(d.mu.log.queue) > 0 && metrics.WAL.PhysicalSize < d.mu.log.queue[len(d.mu.log.queue)-1].FileSize { + metrics.WAL.PhysicalSize = d.mu.log.queue[len(d.mu.log.queue)-1].FileSize } for i, n := 0, len(d.mu.log.queue)-1; i < n; i++ { - metrics.WAL.PhysicalSize += d.mu.log.queue[i].fileSize + metrics.WAL.PhysicalSize += d.mu.log.queue[i].FileSize } metrics.WAL.BytesIn = d.mu.log.bytesIn // protected by d.mu @@ -2726,8 +2727,8 @@ func (d *DB) recycleWAL() (newLogNum base.DiskFileNum, prevLogSize uint64) { // The previous log may have grown past its original physical // size. Update its file size in the queue so we have a proper // accounting of its file size. - if d.mu.log.queue[len(d.mu.log.queue)-1].fileSize < prevLogSize { - d.mu.log.queue[len(d.mu.log.queue)-1].fileSize = prevLogSize + if d.mu.log.queue[len(d.mu.log.queue)-1].FileSize < prevLogSize { + d.mu.log.queue[len(d.mu.log.queue)-1].FileSize = prevLogSize } d.mu.Unlock() @@ -2757,9 +2758,9 @@ func (d *DB) recycleWAL() (newLogNum base.DiskFileNum, prevLogSize uint64) { var recycleOK bool var newLogFile vfs.File if err == nil { - recycleLog, recycleOK = d.logRecycler.peek() + recycleLog, recycleOK = d.logRecycler.Peek() if recycleOK { - recycleLogName := base.MakeFilepath(d.opts.FS, d.walDirname, fileTypeLog, recycleLog.fileNum) + recycleLogName := base.MakeFilepath(d.opts.FS, d.walDirname, fileTypeLog, recycleLog.FileNum) newLogFile, err = d.opts.FS.ReuseForWrite(recycleLogName, newLogName) base.MustExist(d.opts.FS, newLogName, d.opts.Logger, err) } else { @@ -2777,7 +2778,7 @@ func (d *DB) recycleWAL() (newLogNum base.DiskFileNum, prevLogSize uint64) { // TODO(jackson): Adding a boolean to the ReuseForWrite return // value indicating whether or not the file was actually // reused would allow us to skip the stat and use - // recycleLog.fileSize. + // recycleLog.FileSize. var finfo os.FileInfo finfo, err = newLogFile.Stat() if err == nil { @@ -2802,14 +2803,14 @@ func (d *DB) recycleWAL() (newLogNum base.DiskFileNum, prevLogSize uint64) { } if recycleOK { - err = firstError(err, d.logRecycler.pop(recycleLog.fileNum)) + err = firstError(err, d.logRecycler.Pop(recycleLog.FileNum)) } d.opts.EventListener.WALCreated(WALCreateInfo{ JobID: jobID, Path: newLogName, FileNum: newLogNum, - RecycledFileNum: recycleLog.fileNum, + RecycledFileNum: recycleLog.FileNum, Err: err, }) @@ -2826,7 +2827,7 @@ func (d *DB) recycleWAL() (newLogNum base.DiskFileNum, prevLogSize uint64) { panic(err) } - d.mu.log.queue = append(d.mu.log.queue, fileInfo{fileNum: newLogNum, fileSize: newLogSize}) + d.mu.log.queue = append(d.mu.log.queue, fileInfo{FileNum: newLogNum, FileSize: newLogSize}) d.mu.log.LogWriter = record.NewLogWriter(newLogFile, newLogNum, record.LogWriterConfig{ WALFsyncLatency: d.mu.log.metrics.fsyncLatency, WALMinSyncInterval: d.opts.WALMinSyncInterval, @@ -3076,42 +3077,33 @@ func (d *DB) checkVirtualBounds(m *fileMetadata) { return } - if m.HasPointKeys { - pointIter, rangeDelIter, err := d.newIters(context.TODO(), m, nil, internalIterOpts{}) - if err != nil { - panic(errors.Wrap(err, "pebble: error creating point iterator")) - } + iters, err := d.newIters(context.TODO(), m, nil, internalIterOpts{}, iterPointKeys|iterRangeDeletions|iterRangeKeys) + if err != nil { + panic(errors.Wrap(err, "pebble: error creating iterators")) + } + defer iters.CloseAll() - defer pointIter.Close() - if rangeDelIter != nil { - defer rangeDelIter.Close() - } + if m.HasPointKeys { + pointIter := iters.Point() + rangeDelIter := iters.RangeDeletion() + // Check that the lower bound is tight. pointKey, _ := pointIter.First() - var rangeDel *keyspan.Span - if rangeDelIter != nil { - rangeDel, err = rangeDelIter.First() - if err != nil { - panic(err) - } + rangeDel, err := rangeDelIter.First() + if err != nil { + panic(err) } - - // Check that the lower bound is tight. if (rangeDel == nil || d.cmp(rangeDel.SmallestKey().UserKey, m.SmallestPointKey.UserKey) != 0) && (pointKey == nil || d.cmp(pointKey.UserKey, m.SmallestPointKey.UserKey) != 0) { panic(errors.Newf("pebble: virtual sstable %s lower point key bound is not tight", m.FileNum)) } + // Check that the upper bound is tight. pointKey, _ = pointIter.Last() - rangeDel = nil - if rangeDelIter != nil { - rangeDel, err = rangeDelIter.Last() - if err != nil { - panic(err) - } + rangeDel, err = rangeDelIter.Last() + if err != nil { + panic(err) } - - // Check that the upper bound is tight. if (rangeDel == nil || d.cmp(rangeDel.LargestKey().UserKey, m.LargestPointKey.UserKey) != 0) && (pointKey == nil || d.cmp(pointKey.UserKey, m.LargestPointKey.UserKey) != 0) { panic(errors.Newf("pebble: virtual sstable %s upper point key bound is not tight", m.FileNum)) @@ -3123,32 +3115,24 @@ func (d *DB) checkVirtualBounds(m *fileMetadata) { panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.UserKey)) } } - - if rangeDelIter != nil { - s, err := rangeDelIter.First() - for ; s != nil; s, err = rangeDelIter.Next() { - if d.cmp(s.SmallestKey().UserKey, m.SmallestPointKey.UserKey) < 0 { - panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.SmallestKey().UserKey)) - } - if d.cmp(s.LargestKey().UserKey, m.LargestPointKey.UserKey) > 0 { - panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.LargestKey().UserKey)) - } + s, err := rangeDelIter.First() + for ; s != nil; s, err = rangeDelIter.Next() { + if d.cmp(s.SmallestKey().UserKey, m.SmallestPointKey.UserKey) < 0 { + panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.SmallestKey().UserKey)) } - if err != nil { - panic(err) + if d.cmp(s.LargestKey().UserKey, m.LargestPointKey.UserKey) > 0 { + panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, s.LargestKey().UserKey)) } } + if err != nil { + panic(err) + } } if !m.HasRangeKeys { return } - - rangeKeyIter, err := d.tableNewRangeKeyIter(m, keyspan.SpanIterOptions{}) - if err != nil { - panic(errors.Wrap(err, "pebble: error creating range key iterator")) - } - defer rangeKeyIter.Close() + rangeKeyIter := iters.RangeKey() // Check that the lower bound is tight. if s, err := rangeKeyIter.First(); err != nil { diff --git a/vendor/github.com/cockroachdb/pebble/external_iterator.go b/vendor/github.com/cockroachdb/pebble/external_iterator.go index 0b02e8f41d..60e08aa777 100644 --- a/vendor/github.com/cockroachdb/pebble/external_iterator.go +++ b/vendor/github.com/cockroachdb/pebble/external_iterator.go @@ -139,9 +139,8 @@ func NewExternalIterWithContext( // Add the readers to the Iterator so that Close closes them, and // SetOptions can re-construct iterators from them. externalReaders: readers, - newIters: func( - ctx context.Context, f *manifest.FileMetadata, opts *IterOptions, - internalOpts internalIterOpts) (internalIterator, keyspan.FragmentIterator, error) { + newIters: func(context.Context, *manifest.FileMetadata, *IterOptions, + internalIterOpts, iterKinds) (iterSet, error) { // NB: External iterators are currently constructed without any // `levelIters`. newIters should never be called. When we support // organizing multiple non-overlapping files into a single level diff --git a/vendor/github.com/cockroachdb/pebble/flushable.go b/vendor/github.com/cockroachdb/pebble/flushable.go index bc380746b4..eb5b6f8207 100644 --- a/vendor/github.com/cockroachdb/pebble/flushable.go +++ b/vendor/github.com/cockroachdb/pebble/flushable.go @@ -219,7 +219,7 @@ func (s *ingestedFlushable) constructRangeDelIter( ) (keyspan.FragmentIterator, error) { // Note that the keyspan level iter expects a non-nil iterator to be // returned even if there is an error. So, we return the emptyKeyspanIter. - iter, rangeDelIter, err := s.newIters(context.Background(), file, nil, internalIterOpts{}) + iter, rangeDelIter, err := s.newIters.TODO(context.Background(), file, nil, internalIterOpts{}) if err != nil { return emptyKeyspanIter, err } diff --git a/vendor/github.com/cockroachdb/pebble/get_iter.go b/vendor/github.com/cockroachdb/pebble/get_iter.go index 14868e8ff4..b90fa8ca3f 100644 --- a/vendor/github.com/cockroachdb/pebble/get_iter.go +++ b/vendor/github.com/cockroachdb/pebble/get_iter.go @@ -73,6 +73,10 @@ func (g *getIter) Last() (*InternalKey, base.LazyValue) { func (g *getIter) Next() (*InternalKey, base.LazyValue) { if g.iter != nil { g.iterKey, g.iterValue = g.iter.Next() + if err := g.iter.Error(); err != nil { + g.err = err + return nil, base.LazyValue{} + } } for { @@ -134,6 +138,10 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { base.InternalKeySeqNumMax, ) g.iterKey, g.iterValue = g.iter.SeekGE(g.key, base.SeekGEFlagsNone) + if err := g.iter.Error(); err != nil { + g.err = err + return nil, base.LazyValue{} + } g.batch = nil continue } @@ -151,6 +159,10 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { g.rangeDelIter = m.newRangeDelIter(nil) g.mem = g.mem[:n-1] g.iterKey, g.iterValue = g.iter.SeekGE(g.key, base.SeekGEFlagsNone) + if err := g.iter.Error(); err != nil { + g.err = err + return nil, base.LazyValue{} + } continue } @@ -181,6 +193,11 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { prefix = g.key[:g.comparer.Split(g.key)] } g.iterKey, g.iterValue = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone) + if err := g.iter.Error(); err != nil { + g.err = err + return nil, base.LazyValue{} + } + if bc.isSyntheticIterBoundsKey || bc.isIgnorableBoundaryKey { g.iterKey = nil g.iterValue = base.LazyValue{} @@ -219,6 +236,10 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { prefix = g.key[:g.comparer.Split(g.key)] } g.iterKey, g.iterValue = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone) + if err := g.iter.Error(); err != nil { + g.err = err + return nil, base.LazyValue{} + } if bc.isSyntheticIterBoundsKey || bc.isIgnorableBoundaryKey { g.iterKey = nil g.iterValue = base.LazyValue{} diff --git a/vendor/github.com/cockroachdb/pebble/ingest.go b/vendor/github.com/cockroachdb/pebble/ingest.go index 05248d5158..cd7849800d 100644 --- a/vendor/github.com/cockroachdb/pebble/ingest.go +++ b/vendor/github.com/cockroachdb/pebble/ingest.go @@ -1392,6 +1392,52 @@ func (d *DB) ingest( d.mu.Lock() defer d.mu.Unlock() + // Check if any of the currently-open EventuallyFileOnlySnapshots overlap + // in key ranges with the excise span. If so, we need to check for memtable + // overlaps with all bounds of that EventuallyFileOnlySnapshot in addition + // to the ingestion's own bounds too. + + if exciseSpan.Valid() { + for s := d.mu.snapshots.root.next; s != &d.mu.snapshots.root; s = s.next { + if s.efos == nil { + continue + } + if base.Visible(seqNum, s.efos.seqNum, base.InternalKeySeqNumMax) { + // We only worry about snapshots older than the excise. Any snapshots + // created after the excise should see the excised view of the LSM + // anyway. + // + // Since we delay publishing the excise seqnum as visible until after + // the apply step, this case will never be hit in practice until we + // make excises flushable ingests. + continue + } + if invariants.Enabled { + if s.efos.hasTransitioned() { + panic("unexpected transitioned EFOS in snapshots list") + } + } + for i := range s.efos.protectedRanges { + if !s.efos.protectedRanges[i].OverlapsKeyRange(d.cmp, exciseSpan) { + continue + } + // Our excise conflicts with this EFOS. We need to add its protected + // ranges to our overlapBounds. Grow overlapBounds in one allocation + // if necesary. + prs := s.efos.protectedRanges + if cap(overlapBounds) < len(overlapBounds)+len(prs) { + oldOverlapBounds := overlapBounds + overlapBounds = make([]bounded, len(oldOverlapBounds), len(oldOverlapBounds)+len(prs)) + copy(overlapBounds, oldOverlapBounds) + } + for i := range prs { + overlapBounds = append(overlapBounds, &prs[i]) + } + break + } + } + } + // Check to see if any files overlap with any of the memtables. The queue // is ordered from oldest to newest with the mutable memtable being the // last element in the slice. We want to wait for the newest table that @@ -1419,7 +1465,8 @@ func (d *DB) ingest( // flushable queue and switching to a new memtable. metaFlushableOverlaps[v.FileNum] = true case *KeyRange: - // An excise span; not a file. + // An excise span or an EventuallyFileOnlySnapshot protected range; + // not a file. default: panic("unreachable") } @@ -1491,6 +1538,13 @@ func (d *DB) ingest( return } + // If there's an excise being done atomically with the same ingest, we + // assign the lowest sequence number in the set of sequence numbers for this + // ingestion to the excise. Note that we've already allocated fileCount+1 + // sequence numbers in this case. + if exciseSpan.Valid() { + seqNum++ // the first seqNum is reserved for the excise. + } // Update the sequence numbers for all ingested sstables' // metadata. When the version edit is applied, the metadata is // written to the manifest, persisting the sequence number. @@ -1524,8 +1578,12 @@ func (d *DB) ingest( // the commit mutex which would prevent unrelated batches from writing their // changes to the WAL and memtable. This will cause a bigger commit hiccup // during ingestion. + seqNumCount := loadResult.fileCount + if exciseSpan.Valid() { + seqNumCount++ + } d.commit.ingestSem <- struct{}{} - d.commit.AllocateSeqNum(loadResult.fileCount, prepare, apply) + d.commit.AllocateSeqNum(seqNumCount, prepare, apply) <-d.commit.ingestSem if err != nil { @@ -1663,7 +1721,7 @@ func (d *DB) excise( // This file will contain point keys smallestPointKey := m.SmallestPointKey var err error - iter, rangeDelIter, err = d.newIters(context.TODO(), m, &IterOptions{ + iter, rangeDelIter, err = d.newIters.TODO(context.TODO(), m, &IterOptions{ CategoryAndQoS: sstable.CategoryAndQoS{ Category: "pebble-ingest", QoSLevel: sstable.LatencySensitiveQoSLevel, @@ -1781,7 +1839,7 @@ func (d *DB) excise( largestPointKey := m.LargestPointKey var err error if iter == nil && rangeDelIter == nil { - iter, rangeDelIter, err = d.newIters(context.TODO(), m, &IterOptions{ + iter, rangeDelIter, err = d.newIters.TODO(context.TODO(), m, &IterOptions{ CategoryAndQoS: sstable.CategoryAndQoS{ Category: "pebble-ingest", QoSLevel: sstable.LatencySensitiveQoSLevel, @@ -2253,7 +2311,10 @@ func (d *DB) ingestApply( } } // Check for any EventuallyFileOnlySnapshots that could be watching for - // an excise on this span. + // an excise on this span. There should be none as the + // computePossibleOverlaps steps should have forced these EFOS to transition + // to file-only snapshots by now. If we see any that conflict with this + // excise, panic. if exciseSpan.Valid() { for s := d.mu.snapshots.root.next; s != &d.mu.snapshots.root; s = s.next { if s.efos == nil { @@ -2266,8 +2327,7 @@ func (d *DB) ingestApply( // snapshot. for i := range efos.protectedRanges { if efos.protectedRanges[i].OverlapsKeyRange(d.cmp, exciseSpan) { - efos.excised.Store(true) - break + panic("unexpected excise of an EventuallyFileOnlySnapshot's bounds") } } } diff --git a/vendor/github.com/cockroachdb/pebble/internal/base/close_helper.go b/vendor/github.com/cockroachdb/pebble/internal/base/close_helper.go new file mode 100644 index 0000000000..f9da4f199e --- /dev/null +++ b/vendor/github.com/cockroachdb/pebble/internal/base/close_helper.go @@ -0,0 +1,30 @@ +// Copyright 2024 The LevelDB-Go and Pebble Authors. All rights reserved. Use +// of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package base + +import "io" + +// CloseHelper wraps an io.Closer in a wrapper that ignores extra calls to +// Close. It is useful to ensure cleanup in error paths (using defer) without +// double-closing. +func CloseHelper(closer io.Closer) io.Closer { + return &closeHelper{ + Closer: closer, + } +} + +type closeHelper struct { + Closer io.Closer +} + +// Close the underlying Closer, unless it was already closed. +func (h *closeHelper) Close() error { + closer := h.Closer + if closer == nil { + return nil + } + h.Closer = nil + return closer.Close() +} diff --git a/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go b/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go index 97656ce565..24cc26c583 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go +++ b/vendor/github.com/cockroachdb/pebble/internal/base/filenames.go @@ -190,3 +190,9 @@ func MustExist(fs vfs.FS, filename string, fataler Fataler, err error) { fataler.Fatalf("%s:\n%s\ndirectory contains %d files, %d unknown, %d tables, %d logs, %d manifests", fs.PathBase(filename), err, total, unknown, tables, logs, manifests) } + +// FileInfo provides some rudimentary information about a file. +type FileInfo struct { + FileNum DiskFileNum + FileSize uint64 +} diff --git a/vendor/github.com/cockroachdb/pebble/internal/base/iterator.go b/vendor/github.com/cockroachdb/pebble/internal/base/iterator.go index 1b72432f68..39e05e1fc7 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/base/iterator.go +++ b/vendor/github.com/cockroachdb/pebble/internal/base/iterator.go @@ -191,8 +191,10 @@ type InternalIterator interface { // Close closes the iterator and returns any accumulated error. Exhausting // all the key/value pairs in a table is not considered to be an error. - // It is valid to call Close multiple times. Other methods should not be - // called after the iterator has been closed. + // + // Once Close is called, the iterator should not be used again. Specific + // implementations may support multiple calls to Close (but no other calls + // after the first Close). Close() error // SetBounds sets the lower and upper bounds for the iterator. Note that the diff --git a/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go b/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go index 0f33bda848..5c55cb8a61 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go +++ b/vendor/github.com/cockroachdb/pebble/internal/keyspan/interleaving_iter.go @@ -207,11 +207,14 @@ func (i *InterleavingIter) Init( keyspanIter FragmentIterator, opts InterleavingIterOpts, ) { + keyspanIter = MaybeAssert(keyspanIter, comparer.Compare) + // To debug: + // keyspanIter = InjectLogging(keyspanIter, base.DefaultLogger) *i = InterleavingIter{ cmp: comparer.Compare, comparer: comparer, pointIter: pointIter, - keyspanIter: MaybeAssert(keyspanIter, comparer.Compare), + keyspanIter: keyspanIter, mask: opts.Mask, lower: opts.LowerBound, upper: opts.UpperBound, diff --git a/vendor/github.com/cockroachdb/pebble/internal/keyspan/logging_iter.go b/vendor/github.com/cockroachdb/pebble/internal/keyspan/logging_iter.go index 074f4f97f5..0536f5780c 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/keyspan/logging_iter.go +++ b/vendor/github.com/cockroachdb/pebble/internal/keyspan/logging_iter.go @@ -37,7 +37,7 @@ func newLoggingIter(state *loggingState, iter FragmentIterator) FragmentIterator return &loggingIter{ iter: iter, state: state, - context: fmt.Sprintf("%T:", iter), + context: fmt.Sprintf("%T(%p):", iter, iter), } } diff --git a/vendor/github.com/cockroachdb/pebble/internal/keyspan/merging_iter.go b/vendor/github.com/cockroachdb/pebble/internal/keyspan/merging_iter.go index 5d2ee53afc..45945d7c9c 100644 --- a/vendor/github.com/cockroachdb/pebble/internal/keyspan/merging_iter.go +++ b/vendor/github.com/cockroachdb/pebble/internal/keyspan/merging_iter.go @@ -184,7 +184,6 @@ type MergingIter struct { // destination for transforms. Every tranformed span overwrites the // previous. span Span - err error dir int8 // alloc preallocates mergingIterLevel and mergingIterItems for use by the @@ -675,7 +674,7 @@ func (m *MergingIter) Prev() (*Span, error) { // Close closes the iterator, releasing all acquired resources. func (m *MergingIter) Close() error { - err := m.err + var err error for i := range m.levels { err = firstError(err, m.levels[i].iter.Close()) } @@ -834,7 +833,7 @@ func (m *MergingIter) findNextFragmentSet() (*Span, error) { // below loop will still consider [b,d) before continuing to [d, e)). It // returns when it finds a span that is covered by at least one key. - for m.heap.len() > 0 && m.err == nil { + for m.heap.len() > 0 { // Initialize the next span's start bound. SeekGE and First prepare the // heap without advancing. Next leaves the heap in a state such that the // root is the smallest bound key equal to the returned span's end key, @@ -876,12 +875,12 @@ func (m *MergingIter) findNextFragmentSet() (*Span, error) { if err := m.nextEntry(); err != nil { return nil, err } - for len(m.heap.items) > 0 && m.err == nil && m.cmp(m.heapRoot(), m.start) == 0 { + for len(m.heap.items) > 0 && m.cmp(m.heapRoot(), m.start) == 0 { if err := m.nextEntry(); err != nil { return nil, err } } - if len(m.heap.items) == 0 || m.err != nil { + if len(m.heap.items) == 0 { break } @@ -917,7 +916,7 @@ func (m *MergingIter) findPrevFragmentSet() (*Span, error) { // below loop will still consider [b,d) before continuing to [a, b)). It // returns when it finds a span that is covered by at least one key. - for m.heap.len() > 0 && m.err == nil { + for m.heap.len() > 0 { // Initialize the next span's end bound. SeekLT and Last prepare the // heap without advancing. Prev leaves the heap in a state such that the // root is the largest bound key equal to the returned span's start key, @@ -955,13 +954,15 @@ func (m *MergingIter) findPrevFragmentSet() (*Span, error) { // L2: [c, e) // If we're positioned at L1's start(c) start boundary, we want to prev // to move to the first bound < c. - m.err = m.prevEntry() - for len(m.heap.items) > 0 && m.err == nil && m.cmp(m.heapRoot(), m.end) == 0 { - m.err = m.prevEntry() + if err := m.prevEntry(); err != nil { + return nil, err + } + for len(m.heap.items) > 0 && m.cmp(m.heapRoot(), m.end) == 0 { + if err := m.prevEntry(); err != nil { + return nil, err + } } - if m.err != nil { - return nil, m.err - } else if len(m.heap.items) == 0 { + if len(m.heap.items) == 0 { break } diff --git a/vendor/github.com/cockroachdb/pebble/iterator.go b/vendor/github.com/cockroachdb/pebble/iterator.go index 11a92f38b4..b1e8c04cbf 100644 --- a/vendor/github.com/cockroachdb/pebble/iterator.go +++ b/vendor/github.com/cockroachdb/pebble/iterator.go @@ -638,6 +638,12 @@ func (i *Iterator) findNextEntry(limit []byte) { return } } + + // Is iterKey nil due to an error? + if err := i.iter.Error(); err != nil { + i.err = err + i.iterValidityState = IterExhausted + } } func (i *Iterator) nextPointCurrentUserKey() bool { @@ -652,7 +658,15 @@ func (i *Iterator) nextPointCurrentUserKey() bool { i.iterKey, i.iterValue = i.iter.Next() i.stats.ForwardStepCount[InternalIterCall]++ - if i.iterKey == nil || !i.equal(i.key, i.iterKey.UserKey) { + if i.iterKey == nil { + if err := i.iter.Error(); err != nil { + i.err = err + } else { + i.pos = iterPosNext + } + return false + } + if !i.equal(i.key, i.iterKey.UserKey) { i.pos = iterPosNext return false } @@ -742,8 +756,14 @@ func (i *Iterator) nextUserKey() { i.key = i.keyBuf } for { - i.iterKey, i.iterValue = i.iter.Next() i.stats.ForwardStepCount[InternalIterCall]++ + i.iterKey, i.iterValue = i.iter.Next() + if i.iterKey == nil { + if err := i.iter.Error(); err != nil { + i.err = err + return + } + } // NB: We're guaranteed to be on the next user key if the previous key // had a zero sequence number (`done`), or the new key has a trailer // greater or equal to the previous key's trailer. This is true because @@ -952,6 +972,13 @@ func (i *Iterator) findPrevEntry(limit []byte) { // the key prefix. i.stats.ReverseStepCount[InternalIterCall]++ i.iterKey, i.iterValue = i.iter.Prev() + if i.iterKey == nil { + if err := i.iter.Error(); err != nil { + i.err = err + i.iterValidityState = IterExhausted + return + } + } if limit != nil && i.iterKey != nil && i.cmp(limit, i.iterKey.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) { i.iterValidityState = IterAtLimit i.pos = iterPosCurReversePaused @@ -992,8 +1019,8 @@ func (i *Iterator) findPrevEntry(limit []byte) { i.value = LazyValue{} i.iterValidityState = IterExhausted valueMerger = nil - i.iterKey, i.iterValue = i.iter.Prev() i.stats.ReverseStepCount[InternalIterCall]++ + i.iterKey, i.iterValue = i.iter.Prev() // Compare with the limit. We could optimize by only checking when // we step to the previous user key, but detecting that requires a // comparison too. Note that this position may already passed a @@ -1051,12 +1078,14 @@ func (i *Iterator) findPrevEntry(limit []byte) { i.lazyValueBuf = value[:0] } if i.err != nil { + i.iterValidityState = IterExhausted return } valueMerger, i.err = i.merge(i.key, value) var iterValue []byte iterValue, _, i.err = i.iterValue.Value(nil) if i.err != nil { + i.iterValidityState = IterExhausted return } if i.err == nil { @@ -1070,6 +1099,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { var iterValue []byte iterValue, _, i.err = i.iterValue.Value(nil) if i.err != nil { + i.iterValidityState = IterExhausted return } i.err = valueMerger.MergeNewer(iterValue) @@ -1129,6 +1159,10 @@ func (i *Iterator) prevUserKey() { i.iterKey, i.iterValue = i.iter.Prev() i.stats.ReverseStepCount[InternalIterCall]++ if i.iterKey == nil { + if err := i.iter.Error(); err != nil { + i.err = err + i.iterValidityState = IterExhausted + } break } if !i.equal(i.key, i.iterKey.UserKey) { @@ -1327,6 +1361,11 @@ func (i *Iterator) SeekGEWithLimit(key []byte, limit []byte) IterValidityState { if seekInternalIter { i.iterKey, i.iterValue = i.iter.SeekGE(key, flags) i.stats.ForwardSeekCount[InternalIterCall]++ + if err := i.iter.Error(); err != nil { + i.err = err + i.iterValidityState = IterExhausted + return i.iterValidityState + } } i.findNextEntry(limit) i.maybeSampleRead() @@ -1581,6 +1620,11 @@ func (i *Iterator) SeekLTWithLimit(key []byte, limit []byte) IterValidityState { if seekInternalIter { i.iterKey, i.iterValue = i.iter.SeekLT(key, base.SeekLTFlagsNone) i.stats.ReverseSeekCount[InternalIterCall]++ + if err := i.iter.Error(); err != nil { + i.err = err + i.iterValidityState = IterExhausted + return i.iterValidityState + } } i.findPrevEntry(limit) i.maybeSampleRead() @@ -1619,6 +1663,10 @@ func (i *Iterator) First() bool { i.stats.ForwardSeekCount[InterfaceCall]++ i.iterFirstWithinBounds() + if i.err != nil { + i.iterValidityState = IterExhausted + return false + } i.findNextEntry(nil) i.maybeSampleRead() return i.iterValidityState == IterValid @@ -1651,6 +1699,10 @@ func (i *Iterator) Last() bool { i.stats.ReverseSeekCount[InterfaceCall]++ i.iterLastWithinBounds() + if i.err != nil { + i.iterValidityState = IterExhausted + return false + } i.findPrevEntry(nil) i.maybeSampleRead() return i.iterValidityState == IterValid @@ -2013,9 +2065,15 @@ func (i *Iterator) PrevWithLimit(limit []byte) IterValidityState { i.iterLastWithinBounds() } else { i.prevUserKey() + if i.err != nil { + return i.iterValidityState + } } if stepAgain { i.prevUserKey() + if i.err != nil { + return i.iterValidityState + } } } i.findPrevEntry(limit) @@ -2209,6 +2267,7 @@ func (i *Iterator) ValueAndErr() ([]byte, error) { val, callerOwned, err := i.value.Value(i.lazyValueBuf) if err != nil { i.err = err + i.iterValidityState = IterExhausted } if callerOwned { i.lazyValueBuf = val[:0] @@ -2238,7 +2297,7 @@ func (i *Iterator) Valid() bool { valid := i.iterValidityState == IterValid && !i.requiresReposition if invariants.Enabled { if err := i.Error(); valid && err != nil { - panic(errors.WithSecondaryError(errors.AssertionFailedf("pebble: iterator is valid with non-nil Error"), err)) + panic(errors.AssertionFailedf("pebble: iterator is valid with non-nil Error: %+v", err)) } } return valid @@ -2246,10 +2305,13 @@ func (i *Iterator) Valid() bool { // Error returns any accumulated error. func (i *Iterator) Error() error { + if i.err != nil { + return i.err + } if i.iter != nil { - return firstError(i.err, i.iter.Error()) + return i.iter.Error() } - return i.err + return nil } const maxKeyBufCacheSize = 4 << 10 // 4 KB diff --git a/vendor/github.com/cockroachdb/pebble/level_checker.go b/vendor/github.com/cockroachdb/pebble/level_checker.go index 7da59c9e81..96488f3220 100644 --- a/vendor/github.com/cockroachdb/pebble/level_checker.go +++ b/vendor/github.com/cockroachdb/pebble/level_checker.go @@ -202,24 +202,17 @@ func (m *simpleMergingIter) step() bool { return false } // Is this point covered by a tombstone at a lower level? Note that all these - // iterators must be positioned at a key > item.key. So the Largest key bound - // of the sstable containing the tombstone >= item.key. So the upper limit of - // the tombstone cannot be file-bounds-constrained to < item.key. But it is - // possible that item.key < smallest key bound of the sstable, in which case - // this tombstone should be ignored. + // iterators must be positioned at a key > item.key. for level := item.index + 1; level < len(m.levels); level++ { lvl := &m.levels[level] if lvl.rangeDelIter == nil || lvl.tombstone.Empty() { continue } - if (lvl.smallestUserKey == nil || m.heap.cmp(lvl.smallestUserKey, item.key.UserKey) <= 0) && - lvl.tombstone.Contains(m.heap.cmp, item.key.UserKey) { - if lvl.tombstone.CoversAt(m.snapshot, item.key.SeqNum()) { - m.err = errors.Errorf("tombstone %s in %s deletes key %s in %s", - lvl.tombstone.Pretty(m.formatKey), lvl.iter, item.key.Pretty(m.formatKey), - l.iter) - return false - } + if lvl.tombstone.Contains(m.heap.cmp, item.key.UserKey) && lvl.tombstone.CoversAt(m.snapshot, item.key.SeqNum()) { + m.err = errors.Errorf("tombstone %s in %s deletes key %s in %s", + lvl.tombstone.Pretty(m.formatKey), lvl.iter, item.key.Pretty(m.formatKey), + l.iter) + return false } } } @@ -385,7 +378,7 @@ func checkRangeTombstones(c *checkConfig) error { for f := files.First(); f != nil; f = files.Next() { lf := files.Take() //lower, upper := manifest.KeyRange(c.cmp, lf.Iter()) - iterToClose, iter, err := c.newIters( + iterToClose, iter, err := c.newIters.TODO( context.Background(), lf.FileMetadata, &IterOptions{level: manifest.Level(lsmLevel)}, internalIterOpts{}) if err != nil { return err diff --git a/vendor/github.com/cockroachdb/pebble/level_iter.go b/vendor/github.com/cockroachdb/pebble/level_iter.go index 000ca483ae..3cefa66ae1 100644 --- a/vendor/github.com/cockroachdb/pebble/level_iter.go +++ b/vendor/github.com/cockroachdb/pebble/level_iter.go @@ -16,38 +16,6 @@ import ( "github.com/cockroachdb/pebble/sstable" ) -// tableNewIters creates a new point and range-del iterator for the given file -// number. -// -// On success, the internalIterator is not-nil and must be closed; the -// FragmentIterator can be nil. -// TODO(radu): always return a non-nil FragmentIterator. -// -// On error, the iterators are nil. -// -// The only (non-test) implementation of tableNewIters is tableCacheContainer.newIters(). -type tableNewIters func( - ctx context.Context, - file *manifest.FileMetadata, - opts *IterOptions, - internalOpts internalIterOpts, -) (internalIterator, keyspan.FragmentIterator, error) - -// tableNewRangeDelIter takes a tableNewIters and returns a TableNewSpanIter -// for the rangedel iterator returned by tableNewIters. -func tableNewRangeDelIter(ctx context.Context, newIters tableNewIters) keyspan.TableNewSpanIter { - return func(file *manifest.FileMetadata, iterOptions keyspan.SpanIterOptions) (keyspan.FragmentIterator, error) { - iter, rangeDelIter, err := newIters(ctx, file, nil, internalIterOpts{}) - if iter != nil { - _ = iter.Close() - } - if rangeDelIter == nil { - rangeDelIter = emptyKeyspanIter - } - return rangeDelIter, err - } -} - type internalIterOpts struct { bytesIterated *uint64 bufferPool *sstable.BufferPool @@ -676,7 +644,7 @@ func (l *levelIter) loadFile(file *fileMetadata, dir int) loadFileReturnIndicato var rangeDelIter keyspan.FragmentIterator var iter internalIterator - iter, rangeDelIter, l.err = l.newIters(l.ctx, l.iterFile, &l.tableOpts, l.internalOpts) + iter, rangeDelIter, l.err = l.newIters.TODO(l.ctx, l.iterFile, &l.tableOpts, l.internalOpts) l.iter = iter if l.err != nil { return noFileLoaded @@ -696,11 +664,6 @@ func (l *levelIter) loadFile(file *fileMetadata, dir int) loadFileReturnIndicato } else if rangeDelIter != nil { rangeDelIter.Close() } - if l.boundaryContext != nil { - l.boundaryContext.smallestUserKey = file.Smallest.UserKey - l.boundaryContext.largestUserKey = file.Largest.UserKey - l.boundaryContext.isLargestUserKeyExclusive = file.Largest.IsExclusiveSentinel() - } return newFileLoaded } } diff --git a/vendor/github.com/cockroachdb/pebble/merging_iter.go b/vendor/github.com/cockroachdb/pebble/merging_iter.go index ce08647d88..d1028eb95b 100644 --- a/vendor/github.com/cockroachdb/pebble/merging_iter.go +++ b/vendor/github.com/cockroachdb/pebble/merging_iter.go @@ -48,14 +48,6 @@ type mergingIterLevel struct { } type levelIterBoundaryContext struct { - // smallestUserKey and largestUserKey are populated with the smallest and - // largest boundaries of the current file. - smallestUserKey, largestUserKey []byte - // isLargestUserKeyExclusive is set to true when a file's largest boundary - // is an exclusive key, (eg, a range deletion sentinel). If true, the file - // does not contain any keys with the provided user key, and the - // largestUserKey bound is exclusive. - isLargestUserKeyExclusive bool // isSyntheticIterBoundsKey is set to true iff the key returned by the level // iterator is a synthetic key derived from the iterator bounds. This is used // to prevent the mergingIter from being stuck at such a synthetic key if it @@ -737,39 +729,13 @@ func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { continue } - // Reasoning for correctness of untruncated tombstone handling when the untruncated - // tombstone is at a higher level: - // The iterator corresponding to this tombstone is still in the heap so it must be - // positioned >= item.iterKey. Which means the Largest key bound of the sstable containing this - // tombstone is >= item.iterKey. So the upper limit of this tombstone cannot be file-bounds-constrained - // to < item.iterKey. But it is possible that item.key < smallestUserKey, in which - // case this tombstone should be ignored. - // - // Example 1: - // sstable bounds [c#8, g#12] containing a tombstone [b, i)#7, and key is c#6. The - // smallestUserKey is c, so we know the key is within the file bounds and the tombstone - // [b, i) covers it. - // - // Example 2: - // Same sstable bounds but key is b#10. The smallestUserKey is c, so the tombstone [b, i) - // does not cover this key. - // - // For a tombstone at the same level as the key, the file bounds are trivially satisfied. - if (l.smallestUserKey == nil || m.heap.cmp(l.smallestUserKey, item.iterKey.UserKey) <= 0) && - l.tombstone.VisibleAt(m.snapshot) && l.tombstone.Contains(m.heap.cmp, item.iterKey.UserKey) { + if l.tombstone.VisibleAt(m.snapshot) && l.tombstone.Contains(m.heap.cmp, item.iterKey.UserKey) { if level < item.index { // We could also do m.seekGE(..., level + 1). The levels from // [level + 1, item.index) are already after item.iterKey so seeking them may be // wasteful. - // We can seek up to the min of largestUserKey and tombstone.End. - // - // Using example 1 above, we can seek to the smaller of g and i, which is g. - // - // Another example, where the sstable bounds are [c#8, i#InternalRangeDelSentinel], - // and the tombstone is [b, i)#8. Seeking to i is correct since it is seeking up to - // the exclusive bound of the tombstone. We do not need to look at - // isLargestKeyRangeDelSentinel. + // We can seek up to tombstone.End. // // Progress argument: Since this file is at a higher level than item.iterKey we know // that the iterator in this file must be positioned within its bounds and at a key @@ -781,9 +747,6 @@ func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { // seekKey, computed below, is > item.iterKey.UserKey, so the call to seekGE() will // make forward progress. seekKey := l.tombstone.End - if l.largestUserKey != nil && m.heap.cmp(l.largestUserKey, seekKey) < 0 { - seekKey = l.largestUserKey - } // This seek is not directly due to a SeekGE call, so we don't know // enough about the underlying iterator positions, and so we keep the // try-seek-using-next optimization disabled. Additionally, if we're in @@ -974,49 +937,13 @@ func (m *mergingIter) isPrevEntryDeleted(item *mergingIterLevel) (bool, error) { if l.tombstone == nil { continue } - - // Reasoning for correctness of untruncated tombstone handling when the untruncated - // tombstone is at a higher level: - // - // The iterator corresponding to this tombstone is still in the heap so it must be - // positioned <= item.iterKey. Which means the Smallest key bound of the sstable containing this - // tombstone is <= item.iterKey. So the lower limit of this tombstone cannot have been - // file-bounds-constrained to > item.iterKey. But it is possible that item.key >= Largest - // key bound of this sstable, in which case this tombstone should be ignored. - // - // Example 1: - // sstable bounds [c#8, g#12] containing a tombstone [b, i)#7, and key is f#6. The - // largestUserKey is g, so we know the key is within the file bounds and the tombstone - // [b, i) covers it. - // - // Example 2: - // Same sstable but the key is g#6. This cannot happen since the [b, i)#7 untruncated - // tombstone was involved in a compaction which must have had a file to the right of this - // sstable that is part of the same atomic compaction group for future compactions. That - // file must have bounds that cover g#6 and this levelIter must be at that file. - // - // Example 3: - // sstable bounds [c#8, g#RangeDelSentinel] containing [b, i)#7 and the key is g#10. - // This key is not deleted by this tombstone. We need to look at - // isLargestUserKeyExclusive. - // - // For a tombstone at the same level as the key, the file bounds are trivially satisfied. - - // Default to within bounds. - withinLargestSSTableBound := true - if l.largestUserKey != nil { - cmpResult := m.heap.cmp(l.largestUserKey, item.iterKey.UserKey) - withinLargestSSTableBound = cmpResult > 0 || (cmpResult == 0 && !l.isLargestUserKeyExclusive) - } - if withinLargestSSTableBound && l.tombstone.Contains(m.heap.cmp, item.iterKey.UserKey) && l.tombstone.VisibleAt(m.snapshot) { + if l.tombstone.Contains(m.heap.cmp, item.iterKey.UserKey) && l.tombstone.VisibleAt(m.snapshot) { if level < item.index { // We could also do m.seekLT(..., level + 1). The levels from // [level + 1, item.index) are already before item.iterKey so seeking them may be // wasteful. - // We can seek up to the max of smallestUserKey and tombstone.Start.UserKey. - // - // Using example 1 above, we can seek to the larger of c and b, which is c. + // We can seek up to tombstone.Start.UserKey. // // Progress argument: We know that the iterator in this file is positioned within // its bounds and at a key X < item.iterKey (otherwise it would be the max of the heap). @@ -1025,9 +952,6 @@ func (m *mergingIter) isPrevEntryDeleted(item *mergingIterLevel) (bool, error) { // is <= item.iterKey.UserKey, and since we do a seekLT() we will make backwards // progress. seekKey := l.tombstone.Start - if l.smallestUserKey != nil && m.heap.cmp(l.smallestUserKey, seekKey) > 0 { - seekKey = l.smallestUserKey - } // We set the relative-seek flag. This is important when // iterating with lazy combined iteration. If there's a range // key between this level's current file and the file the seek @@ -1159,42 +1083,21 @@ func (m *mergingIter) seekGE(key []byte, level int, flags base.SeekGEFlags) erro (m.combinedIterState == nil || m.combinedIterState.initialized) { // The level has a range-del iterator. Find the tombstone containing // the search key. - // - // For untruncated tombstones that are possibly file-bounds-constrained, we are using a - // levelIter which will set smallestUserKey and largestUserKey. Since the levelIter - // is at this file we know that largestUserKey >= key, so we know that the - // tombstone we find cannot be file-bounds-constrained in its upper bound to something < key. - // We do need to compare with smallestUserKey to ensure that the tombstone is not - // file-bounds-constrained in its lower bound. - // - // See the detailed comments in isNextEntryDeleted() on why similar containment and - // seeking logic is correct. The subtle difference here is that key is a user key, - // so we can have a sstable with bounds [c#8, i#InternalRangeDelSentinel], and the - // tombstone is [b, k)#8 and the seek key is i: levelIter.SeekGE(i) will move past - // this sstable since it realizes the largest key is a InternalRangeDelSentinel. var err error l.tombstone, err = rangeDelIter.SeekGE(key) if err != nil { return err } - if l.tombstone != nil && l.tombstone.VisibleAt(m.snapshot) && l.tombstone.Contains(m.heap.cmp, key) && - (l.smallestUserKey == nil || m.heap.cmp(l.smallestUserKey, key) <= 0) { - // NB: Based on the comment above l.largestUserKey >= key, and based on the - // containment condition tombstone.End > key, so the assignment to key results - // in a monotonically non-decreasing key across iterations of this loop. + if l.tombstone != nil && l.tombstone.VisibleAt(m.snapshot) && l.tombstone.Contains(m.heap.cmp, key) { + // Based on the containment condition tombstone.End > key, so + // the assignment to key results in a monotonically + // non-decreasing key across iterations of this loop. // - // The adjustment of key here can only move it to a larger key. Since - // the caller of seekGE guaranteed that the original key was greater - // than or equal to m.lower, the new key will continue to be greater - // than or equal to m.lower. - if l.largestUserKey != nil && - m.heap.cmp(l.largestUserKey, l.tombstone.End) < 0 { - // Truncate the tombstone for seeking purposes. Note that this can over-truncate - // but that is harmless for this seek optimization. - key = l.largestUserKey - } else { - key = l.tombstone.End - } + // The adjustment of key here can only move it to a larger key. + // Since the caller of seekGE guaranteed that the original key + // was greater than or equal to m.lower, the new key will + // continue to be greater than or equal to m.lower. + key = l.tombstone.End } } } @@ -1259,48 +1162,23 @@ func (m *mergingIter) seekLT(key []byte, level int, flags base.SeekLTFlags) erro (m.combinedIterState == nil || m.combinedIterState.initialized) { // The level has a range-del iterator. Find the tombstone containing // the search key. - // - // For untruncated tombstones that are possibly file-bounds-constrained we are using a - // levelIter which will set smallestUserKey and largestUserKey. Since the levelIter - // is at this file we know that smallestUserKey <= key, so we know that the - // tombstone we find cannot be file-bounds-constrained in its lower bound to something > key. - // We do need to compare with largestUserKey to ensure that the tombstone is not - // file-bounds-constrained in its upper bound. - // - // See the detailed comments in isPrevEntryDeleted() on why similar containment and - // seeking logic is correct. - - // Default to within bounds. - withinLargestSSTableBound := true - if l.largestUserKey != nil { - cmpResult := m.heap.cmp(l.largestUserKey, key) - withinLargestSSTableBound = cmpResult > 0 || (cmpResult == 0 && !l.isLargestUserKeyExclusive) - } - tomb, err := keyspan.SeekLE(m.heap.cmp, rangeDelIter, key) if err != nil { return err } l.tombstone = tomb if l.tombstone != nil && l.tombstone.VisibleAt(m.snapshot) && - l.tombstone.Contains(m.heap.cmp, key) && withinLargestSSTableBound { - // NB: Based on the comment above l.smallestUserKey <= key, and based - // on the containment condition tombstone.Start.UserKey <= key, so the - // assignment to key results in a monotonically non-increasing key - // across iterations of this loop. + l.tombstone.Contains(m.heap.cmp, key) { + // NB: Based on the containment condition + // tombstone.Start.UserKey <= key, so the assignment to key + // results in a monotonically non-increasing key across + // iterations of this loop. // - // The adjustment of key here can only move it to a smaller key. Since - // the caller of seekLT guaranteed that the original key was less than - // or equal to m.upper, the new key will continue to be less than or - // equal to m.upper. - if l.smallestUserKey != nil && - m.heap.cmp(l.smallestUserKey, l.tombstone.Start) >= 0 { - // Truncate the tombstone for seeking purposes. Note that this can over-truncate - // but that is harmless for this seek optimization. - key = l.smallestUserKey - } else { - key = l.tombstone.Start - } + // The adjustment of key here can only move it to a smaller key. + // Since the caller of seekLT guaranteed that the original key + // was less than or equal to m.upper, the new key will continue + // to be less than or equal to m.upper. + key = l.tombstone.Start } } } diff --git a/vendor/github.com/cockroachdb/pebble/open.go b/vendor/github.com/cockroachdb/pebble/open.go index 19f025988b..1647b2e3ab 100644 --- a/vendor/github.com/cockroachdb/pebble/open.go +++ b/vendor/github.com/cockroachdb/pebble/open.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/pebble/record" "github.com/cockroachdb/pebble/sstable" "github.com/cockroachdb/pebble/vfs" + "github.com/cockroachdb/pebble/wal" "github.com/prometheus/client_golang/prometheus" ) @@ -213,10 +214,11 @@ func Open(dirname string, opts *Options) (db *DB, err error) { fileLock: fileLock, dataDir: dataDir, walDir: walDir, - logRecycler: logRecycler{limit: opts.MemTableStopWritesThreshold + 1}, + logRecycler: wal.LogRecycler{}, closed: new(atomic.Value), closedCh: make(chan struct{}), } + d.logRecycler.Init(opts.MemTableStopWritesThreshold + 1) d.mu.versions = &versionSet{} d.diskAvailBytes.Store(math.MaxUint64) @@ -368,7 +370,7 @@ func Open(dirname string, opts *Options) (db *DB, err error) { opts.TableCache, d.cacheID, d.objProvider, d.opts, tableCacheSize, &sstable.CategoryStatsCollector{}) d.newIters = d.tableCache.newIters - d.tableNewRangeKeyIter = d.tableCache.newRangeKeyIter + d.tableNewRangeKeyIter = tableNewRangeKeyIter(context.TODO(), d.newIters) // Replay any newer log files than the ones named in the manifest. type fileNumAndName struct { @@ -395,8 +397,8 @@ func Open(dirname string, opts *Options) (db *DB, err error) { if fn >= d.mu.versions.minUnflushedLogNum { logFiles = append(logFiles, fileNumAndName{fn, filename}) } - if d.logRecycler.minRecycleLogNum <= fn { - d.logRecycler.minRecycleLogNum = fn + 1 + if d.logRecycler.MinRecycleLogNum() <= fn { + d.logRecycler.SetMinRecycleLogNum(fn + 1) } case fileTypeOptions: if previousOptionsFileNum < fn { @@ -482,7 +484,7 @@ func Open(dirname string, opts *Options) (db *DB, err error) { } newLogName := base.MakeFilepath(opts.FS, d.walDirname, fileTypeLog, newLogNum) - d.mu.log.queue = append(d.mu.log.queue, fileInfo{fileNum: newLogNum, fileSize: 0}) + d.mu.log.queue = append(d.mu.log.queue, fileInfo{FileNum: newLogNum, FileSize: 0}) logFile, err := opts.FS.Create(newLogName) if err != nil { return nil, err diff --git a/vendor/github.com/cockroachdb/pebble/options.go b/vendor/github.com/cockroachdb/pebble/options.go index c51746d448..799ace9528 100644 --- a/vendor/github.com/cockroachdb/pebble/options.go +++ b/vendor/github.com/cockroachdb/pebble/options.go @@ -1056,11 +1056,6 @@ type Options struct { // against the FS are made after the DB is closed, the FS may leak a // goroutine indefinitely. fsCloser io.Closer - - // efosAlwaysCreatesIterators is set by some tests to force - // EventuallyFileOnlySnapshots to always create iterators, even after a - // conflicting excise. - efosAlwaysCreatesIterators bool } } @@ -1237,13 +1232,6 @@ func (o *Options) AddEventListener(l EventListener) { o.EventListener = &l } -// TestingAlwaysCreateEFOSIterators is used to toggle a private option for -// having EventuallyFileOnlySnapshots always create iterators. Meant to only -// be used in tests. -func (o *Options) TestingAlwaysCreateEFOSIterators(value bool) { - o.private.efosAlwaysCreatesIterators = value -} - func (o *Options) equal() Equal { if o.Comparer.Equal == nil { return bytes.Equal diff --git a/vendor/github.com/cockroachdb/pebble/record/log_writer.go b/vendor/github.com/cockroachdb/pebble/record/log_writer.go index 3b1fa775fe..95a5dd2846 100644 --- a/vendor/github.com/cockroachdb/pebble/record/log_writer.go +++ b/vendor/github.com/cockroachdb/pebble/record/log_writer.go @@ -150,12 +150,14 @@ func (q *syncQueue) pop(head, tail uint32, err error, queueSemChan chan struct{} *slot.err = err slot.wg = nil slot.err = nil - // We need to bump the tail count before signalling the wait group as - // signalling the wait group can trigger release a blocked goroutine which - // will try to enqueue before we've "freed" space in the queue. + // We need to bump the tail count before releasing the queueSemChan + // semaphore as releasing the semaphore can cause a blocked goroutine to + // acquire the semaphore and enqueue before we've "freed" space in the + // queue. q.headTail.Add(1) wg.Done() - // Is always non-nil in production. + // Is always non-nil in production, unless using wal package for WAL + // failover. if queueSemChan != nil { <-queueSemChan } @@ -164,17 +166,180 @@ func (q *syncQueue) pop(head, tail uint32, err error, queueSemChan chan struct{} return nil } +// pendingSyncs abstracts out the handling of pending sync requests. In +// standalone mode the implementation is a thin wrapper around syncQueue. In +// the mode where the LogWriter can be subject to failover, there is no queue +// kept in the LogWriter and the signaling to those waiting for sync is +// handled in the wal package. +// +// To avoid heap allocations due to the use of this interface, the parameters +// and return values follow some strict rules: +// - The PendingSync parameter can be reused by the caller after push returns. +// The implementation should be a pointer backed by a struct that is already +// heap allocated, which the caller can reuse for the next push call. +// - The pendingSyncSnapshot return value must be backed by the pendingSyncs +// implementation, so calling snapshotForPop again will cause the previous +// snapshot to be overwritten. +type pendingSyncs interface { + push(PendingSync) + setBlocked() + clearBlocked() + empty() bool + snapshotForPop() pendingSyncsSnapshot + pop(snap pendingSyncsSnapshot, err error) error +} + +type pendingSyncsSnapshot interface { + empty() bool +} + +// PendingSync abstracts the sync specification for a record queued on the +// LogWriter. The only implementations are provided in this package since +// syncRequested is not exported. +type PendingSync interface { + syncRequested() bool +} + +// The implementation of pendingSyncs in standalone mode. +type pendingSyncsWithSyncQueue struct { + syncQueue + syncQueueLen *base.GaugeSampleMetric + snapshotBacking syncQueueSnapshot + // See the comment for LogWriterConfig.QueueSemChan. + queueSemChan chan struct{} +} + +var _ pendingSyncs = &pendingSyncsWithSyncQueue{} + +func (q *pendingSyncsWithSyncQueue) push(ps PendingSync) { + ps2 := ps.(*pendingSyncForSyncQueue) + q.syncQueue.push(ps2.wg, ps2.err) +} + +func (q *pendingSyncsWithSyncQueue) snapshotForPop() pendingSyncsSnapshot { + head, tail, realLength := q.syncQueue.load() + q.snapshotBacking = syncQueueSnapshot{ + head: head, + tail: tail, + } + q.syncQueueLen.AddSample(int64(realLength)) + return &q.snapshotBacking +} + +func (q *pendingSyncsWithSyncQueue) pop(snap pendingSyncsSnapshot, err error) error { + s := snap.(*syncQueueSnapshot) + return q.syncQueue.pop(s.head, s.tail, err, q.queueSemChan) +} + +// The implementation of pendingSyncsSnapshot in standalone mode. +type syncQueueSnapshot struct { + head, tail uint32 +} + +func (s *syncQueueSnapshot) empty() bool { + return s.head == s.tail +} + +// The implementation of pendingSync in standalone mode. +type pendingSyncForSyncQueue struct { + wg *sync.WaitGroup + err *error +} + +func (ps *pendingSyncForSyncQueue) syncRequested() bool { + return ps.wg != nil +} + +// The implementation of pendingSyncs in failover mode. +type pendingSyncsWithHighestSyncIndex struct { + // The highest "index" queued that is requesting a sync. Initialized + // to NoSyncIndex, and reset to NoSyncIndex after the sync. + index atomic.Int64 + snapshotBacking PendingSyncIndex + // blocked is an atomic boolean which indicates whether syncing is currently + // blocked or can proceed. It is used by the implementation of + // min-sync-interval to block syncing until the min interval has passed. + blocked atomic.Bool + externalSyncQueueCallback ExternalSyncQueueCallback +} + +// NoSyncIndex is the value of PendingSyncIndex when a sync is not requested. +const NoSyncIndex = -1 + +func (si *pendingSyncsWithHighestSyncIndex) init( + externalSyncQueueCallback ExternalSyncQueueCallback, +) { + si.index.Store(NoSyncIndex) + si.externalSyncQueueCallback = externalSyncQueueCallback +} + +func (si *pendingSyncsWithHighestSyncIndex) push(ps PendingSync) { + ps2 := ps.(*PendingSyncIndex) + si.index.Store(ps2.Index) +} + +func (si *pendingSyncsWithHighestSyncIndex) setBlocked() { + si.blocked.Store(true) +} + +func (si *pendingSyncsWithHighestSyncIndex) clearBlocked() { + si.blocked.Store(false) +} + +func (si *pendingSyncsWithHighestSyncIndex) empty() bool { + return si.load() == NoSyncIndex +} + +func (si *pendingSyncsWithHighestSyncIndex) snapshotForPop() pendingSyncsSnapshot { + si.snapshotBacking = PendingSyncIndex{Index: si.load()} + return &si.snapshotBacking +} + +func (si *pendingSyncsWithHighestSyncIndex) load() int64 { + index := si.index.Load() + if index != NoSyncIndex && si.blocked.Load() { + index = NoSyncIndex + } + return index +} + +func (si *pendingSyncsWithHighestSyncIndex) pop(snap pendingSyncsSnapshot, err error) error { + index := snap.(*PendingSyncIndex) + if index.Index == NoSyncIndex { + return nil + } + // Set to NoSyncIndex if a higher index has not queued. + si.index.CompareAndSwap(index.Index, NoSyncIndex) + si.externalSyncQueueCallback(*index, err) + return nil +} + +// PendingSyncIndex implements both pendingSyncsSnapshot and PendingSync. +type PendingSyncIndex struct { + // Index is some state meaningful to the user of LogWriter. The LogWriter + // itself only examines whether Index is equal to NoSyncIndex. + Index int64 +} + +func (s *PendingSyncIndex) empty() bool { + return s.Index == NoSyncIndex +} + +func (s *PendingSyncIndex) syncRequested() bool { + return s.Index != NoSyncIndex +} + // flusherCond is a specialized condition variable that allows its condition to // change and readiness be signalled without holding its associated mutex. In // particular, when a waiter is added to syncQueue atomically, this condition // variable can be signalled without holding flusher.Mutex. type flusherCond struct { mu *sync.Mutex - q *syncQueue + q pendingSyncs cond sync.Cond } -func (c *flusherCond) init(mu *sync.Mutex, q *syncQueue) { +func (c *flusherCond) init(mu *sync.Mutex, q pendingSyncs) { c.mu = mu c.q = q // Yes, this is a bit circular, but that is intentional. flusherCond.cond.L @@ -259,8 +424,12 @@ type LogWriter struct { logNum uint32 // blockNum is the zero based block number for the current block. blockNum int64 - // err is any accumulated error. TODO(peter): This needs to be protected in - // some fashion. Perhaps using atomic.Value. + // err is any accumulated error. It originates in flusher.err, and is + // updated to reflect flusher.err when a block is full and getting enqueued. + // Therefore, there is a lag between when flusher.err has a non-nil error, + // and when that non-nil error is reflected in LogWriter.err. On close, it + // is set to errClosedWriter to inform accidental future calls to + // SyncRecord*. err error // block is the current block being written. Protected by flusher.Mutex. block *block @@ -286,8 +455,10 @@ type LogWriter struct { minSyncInterval durationFunc fsyncLatency prometheus.Histogram pending []*block - syncQ syncQueue - metrics *LogWriterMetrics + // Pushing and popping from pendingSyncs does not require flusher mutex to + // be held. + pendingSyncs pendingSyncs + metrics *LogWriterMetrics } // afterFunc is a hook to allow tests to mock out the timer functionality @@ -295,8 +466,11 @@ type LogWriter struct { // time.AfterFunc. afterFunc func(d time.Duration, f func()) syncTimer - // See the comment for LogWriterConfig.QueueSemChan. - queueSemChan chan struct{} + // Backing for both pendingSyncs implementations. + pendingSyncsBackingQ pendingSyncsWithSyncQueue + pendingSyncsBackingIndex pendingSyncsWithHighestSyncIndex + + pendingSyncForSyncQueueBacking pendingSyncForSyncQueue } // LogWriterConfig is a struct used for configuring new LogWriters @@ -308,8 +482,25 @@ type LogWriterConfig struct { // the syncQueue from overflowing (which will cause a panic). All production // code ensures this is non-nil. QueueSemChan chan struct{} + + // ExternalSyncQueueCallback is set to non-nil when the LogWriter is used + // as part of a WAL implementation that can failover between LogWriters. + // + // In this case, QueueSemChan is always nil, and SyncRecordGeneralized must + // be used with a PendingSync parameter that is implemented by + // PendingSyncIndex. When an index is synced (which implies all earlier + // indices are also synced), this callback is invoked. The caller must not + // hold any mutex when invoking this callback, since the lock ordering + // requirement in this case is that any higher layer locks (in the wal + // package) precede the lower layer locks (in the record package). These + // callbacks are serialized since they are invoked from the flushLoop. + ExternalSyncQueueCallback ExternalSyncQueueCallback } +// ExternalSyncQueueCallback is to be run when a PendingSync has been +// processed, either successfully or with an error. +type ExternalSyncQueueCallback func(doneSync PendingSyncIndex, err error) + // initialAllocatedBlocksCap is the initial capacity of the various slices // intended to hold LogWriter blocks. The LogWriter may allocate more blocks // than this threshold allows. @@ -323,6 +514,9 @@ var blockPool = sync.Pool{ } // NewLogWriter returns a new LogWriter. +// +// The io.Writer may also be used as an io.Closer and syncer. No other methods +// will be called on the writer. func NewLogWriter( w io.Writer, logNum base.DiskFileNum, logWriterConfig LogWriterConfig, ) *LogWriter { @@ -340,14 +534,25 @@ func NewLogWriter( afterFunc: func(d time.Duration, f func()) syncTimer { return time.AfterFunc(d, f) }, - queueSemChan: logWriterConfig.QueueSemChan, } + m := &LogWriterMetrics{} + if logWriterConfig.ExternalSyncQueueCallback != nil { + r.pendingSyncsBackingIndex.init(logWriterConfig.ExternalSyncQueueCallback) + r.flusher.pendingSyncs = &r.pendingSyncsBackingIndex + } else { + r.pendingSyncsBackingQ = pendingSyncsWithSyncQueue{ + syncQueueLen: &m.SyncQueueLen, + queueSemChan: logWriterConfig.QueueSemChan, + } + r.flusher.pendingSyncs = &r.pendingSyncsBackingQ + } + r.free.blocks = make([]*block, 0, initialAllocatedBlocksCap) r.block = blockPool.Get().(*block) - r.flusher.ready.init(&r.flusher.Mutex, &r.flusher.syncQ) + r.flusher.ready.init(&r.flusher.Mutex, r.flusher.pendingSyncs) r.flusher.closed = make(chan struct{}) r.flusher.pending = make([]*block, 0, cap(r.free.blocks)) - r.flusher.metrics = &LogWriterMetrics{} + r.flusher.metrics = m f := &r.flusher f.minSyncInterval = logWriterConfig.WALMinSyncInterval @@ -423,14 +628,14 @@ func (w *LogWriter) flushLoop(context.Context) { // the current block can be added to the pending blocks list after we release // the flusher lock, but it won't be part of pending. written := w.block.written.Load() - if len(f.pending) > 0 || written > w.block.flushed || !f.syncQ.empty() { + if len(f.pending) > 0 || written > w.block.flushed || !f.pendingSyncs.empty() { break } if f.close { // If the writer is closed, pretend the sync timer fired immediately so // that we can process any queued sync requests. - f.syncQ.clearBlocked() - if !f.syncQ.empty() { + f.pendingSyncs.clearBlocked() + if !f.pendingSyncs.empty() { break } return @@ -439,6 +644,18 @@ func (w *LogWriter) flushLoop(context.Context) { continue } // Found work to do, so no longer idle. + // + // NB: it is safe to read pending before loading from the syncQ since + // mutations to pending require the w.flusher mutex, which is held here. + // There is no risk that someone will concurrently add to pending, so the + // following sequence, which would pick up a syncQ entry without the + // corresponding data, is impossible: + // + // Thread enqueueing This thread + // 1. read pending + // 2. add block to pending + // 3. add to syncQ + // 4. read syncQ workStartTime := time.Now() idleDuration := workStartTime.Sub(idleStartTime) pending = append(pending[:0], f.pending...) @@ -448,8 +665,7 @@ func (w *LogWriter) flushLoop(context.Context) { // Grab the list of sync waiters. Note that syncQueue.load() will return // 0,0 while we're waiting for the min-sync-interval to expire. This // allows flushing to proceed even if we're not ready to sync. - head, tail, realSyncQLen := f.syncQ.load() - f.metrics.SyncQueueLen.AddSample(int64(realSyncQLen)) + snap := f.pendingSyncs.snapshotForPop() // Grab the portion of the current block that requires flushing. Note that // the current block can be added to the pending blocks list after we @@ -461,25 +677,29 @@ func (w *LogWriter) flushLoop(context.Context) { data := w.block.buf[w.block.flushed:written] w.block.flushed = written + fErr := f.err + f.Unlock() // If flusher has an error, we propagate it to waiters. Note in spite of // error we consume the pending list above to free blocks for writers. - if f.err != nil { - f.syncQ.pop(head, tail, f.err, w.queueSemChan) + if fErr != nil { + // NB: pop may invoke ExternalSyncQueueCallback, which is why we have + // called f.Unlock() above. We will acquire the lock again below. + f.pendingSyncs.pop(snap, fErr) // Update the idleStartTime if work could not be done, so that we don't // include the duration we tried to do work as idle. We don't bother // with the rest of the accounting, which means we will undercount. idleStartTime = time.Now() + f.Lock() continue } - f.Unlock() - synced, syncLatency, bytesWritten, err := w.flushPending(data, pending, head, tail) + synced, syncLatency, bytesWritten, err := w.flushPending(data, pending, snap) f.Lock() if synced && f.fsyncLatency != nil { f.fsyncLatency.Observe(float64(syncLatency)) } f.err = err if f.err != nil { - f.syncQ.clearBlocked() + f.pendingSyncs.clearBlocked() // Update the idleStartTime if work could not be done, so that we don't // include the duration we tried to do work as idle. We don't bother // with the rest of the accounting, which means we will undercount. @@ -491,10 +711,10 @@ func (w *LogWriter) flushLoop(context.Context) { // A sync was performed. Make sure we've waited for the min sync // interval before syncing again. if min := f.minSyncInterval(); min > 0 { - f.syncQ.setBlocked() + f.pendingSyncs.setBlocked() if syncTimer == nil { syncTimer = w.afterFunc(min, func() { - f.syncQ.clearBlocked() + f.pendingSyncs.clearBlocked() f.ready.Signal() }) } else { @@ -512,7 +732,7 @@ func (w *LogWriter) flushLoop(context.Context) { } func (w *LogWriter) flushPending( - data []byte, pending []*block, head, tail uint32, + data []byte, pending []*block, snap pendingSyncsSnapshot, ) (synced bool, syncLatency time.Duration, bytesWritten int64, err error) { defer func() { // Translate panics into errors. The errors will cause flushLoop to shut @@ -535,14 +755,16 @@ func (w *LogWriter) flushPending( _, err = w.w.Write(data) } - synced = head != tail + synced = !snap.empty() if synced { if err == nil && w.s != nil { syncLatency, err = w.syncWithLatency() + } else { + synced = false } f := &w.flusher - if popErr := f.syncQ.pop(head, tail, err, w.queueSemChan); popErr != nil { - return synced, syncLatency, bytesWritten, popErr + if popErr := f.pendingSyncs.pop(snap, err); popErr != nil { + return synced, syncLatency, bytesWritten, firstError(err, popErr) } } @@ -595,6 +817,16 @@ func (w *LogWriter) queueBlock() { // Close flushes and syncs any unwritten data and closes the writer. // Where required, external synchronisation is provided by commitPipeline.mu. func (w *LogWriter) Close() error { + return w.closeInternal(PendingSyncIndex{Index: NoSyncIndex}) +} + +// CloseWithLastQueuedRecord is like Close, but optionally accepts a +// lastQueuedRecord, that the caller will be notified about when synced. +func (w *LogWriter) CloseWithLastQueuedRecord(lastQueuedRecord PendingSyncIndex) error { + return w.closeInternal(lastQueuedRecord) +} + +func (w *LogWriter) closeInternal(lastQueuedRecord PendingSyncIndex) error { f := &w.flusher // Emit an EOF trailer signifying the end of this log. This helps readers @@ -621,18 +853,21 @@ func (w *LogWriter) Close() error { syncLatency, err = w.syncWithLatency() } f.Lock() - if f.fsyncLatency != nil { + if err == nil && f.fsyncLatency != nil { f.fsyncLatency.Observe(float64(syncLatency)) } free := w.free.blocks f.Unlock() + // NB: the caller of closeInternal may not care about a non-nil cerr below + // if all queued writes have been successfully written and synced. + if lastQueuedRecord.Index != NoSyncIndex { + w.pendingSyncsBackingIndex.externalSyncQueueCallback(lastQueuedRecord, err) + } if w.c != nil { cerr := w.c.Close() w.c = nil - if cerr != nil { - return cerr - } + err = firstError(err, cerr) } for _, b := range free { @@ -645,6 +880,15 @@ func (w *LogWriter) Close() error { return err } +// firstError returns the first non-nil error of err0 and err1, or nil if both +// are nil. +func firstError(err0, err1 error) error { + if err0 != nil { + return err0 + } + return err1 +} + // WriteRecord writes a complete record. Returns the offset just past the end // of the record. // External synchronisation provided by commitPipeline.mu. @@ -661,6 +905,16 @@ func (w *LogWriter) WriteRecord(p []byte) (int64, error) { func (w *LogWriter) SyncRecord( p []byte, wg *sync.WaitGroup, err *error, ) (logSize int64, err2 error) { + w.pendingSyncForSyncQueueBacking = pendingSyncForSyncQueue{ + wg: wg, + err: err, + } + return w.SyncRecordGeneralized(p, &w.pendingSyncForSyncQueueBacking) +} + +// SyncRecordGeneralized is a version of SyncRecord that accepts a +// PendingSync. +func (w *LogWriter) SyncRecordGeneralized(p []byte, ps PendingSync) (logSize int64, err2 error) { if w.err != nil { return -1, w.err } @@ -673,14 +927,14 @@ func (w *LogWriter) SyncRecord( p = w.emitFragment(i, p) } - if wg != nil { + if ps.syncRequested() { // If we've been asked to persist the record, add the WaitGroup to the sync // queue and signal the flushLoop. Note that flushLoop will write partial // blocks to the file if syncing has been requested. The contract is that // any record written to the LogWriter to this point will be flushed to the // OS and synced to disk. f := &w.flusher - f.syncQ.push(wg, err) + f.pendingSyncs.push(ps) f.ready.Signal() } diff --git a/vendor/github.com/cockroachdb/pebble/scan_internal.go b/vendor/github.com/cockroachdb/pebble/scan_internal.go index 3fdb148e44..3073559ba8 100644 --- a/vendor/github.com/cockroachdb/pebble/scan_internal.go +++ b/vendor/github.com/cockroachdb/pebble/scan_internal.go @@ -464,7 +464,7 @@ func (d *DB) truncateSharedFile( // We will need to truncate file bounds in at least one direction. Open all // relevant iterators. - iter, rangeDelIter, err := d.newIters(ctx, file, &IterOptions{ + iter, rangeDelIter, err := d.newIters.TODO(ctx, file, &IterOptions{ LowerBound: lower, UpperBound: upper, level: manifest.Level(level), diff --git a/vendor/github.com/cockroachdb/pebble/snapshot.go b/vendor/github.com/cockroachdb/pebble/snapshot.go index 1714765a32..192b5ae757 100644 --- a/vendor/github.com/cockroachdb/pebble/snapshot.go +++ b/vendor/github.com/cockroachdb/pebble/snapshot.go @@ -9,20 +9,13 @@ import ( "io" "math" "sync" - "sync/atomic" "time" - "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/rangekey" "github.com/cockroachdb/pebble/sstable" ) -// ErrSnapshotExcised is returned from WaitForFileOnlySnapshot if an excise -// overlapping with one of the EventuallyFileOnlySnapshot's KeyRanges gets -// applied before the transition of that EFOS to a file-only snapshot. -var ErrSnapshotExcised = errors.New("pebble: snapshot excised before conversion to file-only snapshot") - // Snapshot provides a read-only point-in-time view of the DB state. type Snapshot struct { // The db the snapshot was created from. @@ -228,13 +221,12 @@ func (l *snapshotList) remove(s *Snapshot) { // the snapshot is closed may prefer EventuallyFileOnlySnapshots for their // reduced write amplification. Callers that desire the benefits of the file-only // state that requires no pinning of memtables should call -// `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns -// ErrSnapshotExcised) before relying on the EFOS to keep producing iterators +// `WaitForFileOnlySnapshot()` before relying on the EFOS to keep producing iterators // with zero write-amp and zero pinning of memtables in memory. // // EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in -// subtle ways. No new iterators can be created once -// EventuallyFileOnlySnapshot.excised is set to true. +// subtle ways. The IngestAndExcise can force the transition of an EFOS to a +// file-only snapshot if an excise overlaps with the EFOS bounds. type EventuallyFileOnlySnapshot struct { mu struct { // NB: If both this mutex and db.mu are being grabbed, db.mu should be @@ -250,28 +242,14 @@ type EventuallyFileOnlySnapshot struct { snap *Snapshot // The wrapped version reference, if a file-only snapshot. vers *version - - // The readState corresponding to when this EFOS was created. Only set - // if alwaysCreateIters is true. - rs *readState } // Key ranges to watch for an excise on. protectedRanges []KeyRange - // excised, if true, signals that the above ranges were excised during the - // lifetime of this snapshot. - excised atomic.Bool // The db the snapshot was created from. db *DB seqNum uint64 - - // If true, this EventuallyFileOnlySnapshot will always generate iterators that - // retain snapshot semantics, by holding onto the readState if a conflicting - // excise were to happen. Only used in some tests to enforce deterministic - // behaviour around excises. - alwaysCreateIters bool - closed chan struct{} } @@ -289,14 +267,10 @@ func (d *DB) makeEventuallyFileOnlySnapshot(keyRanges []KeyRange) *EventuallyFil }, sliceAsBounded(keyRanges)...) } es := &EventuallyFileOnlySnapshot{ - db: d, - seqNum: seqNum, - protectedRanges: keyRanges, - closed: make(chan struct{}), - alwaysCreateIters: d.opts.private.efosAlwaysCreatesIterators, - } - if es.alwaysCreateIters { - es.mu.rs = d.loadReadState() + db: d, + seqNum: seqNum, + protectedRanges: keyRanges, + closed: make(chan struct{}), } if isFileOnly { es.mu.vers = d.mu.versions.currentVersion() @@ -390,9 +364,6 @@ func (es *EventuallyFileOnlySnapshot) waitForFlush(ctx context.Context, dur time earliestUnflushedSeqNum = es.db.getEarliestUnflushedSeqNumLocked() } - if es.excised.Load() { - return ErrSnapshotExcised - } return nil } @@ -440,9 +411,6 @@ func (es *EventuallyFileOnlySnapshot) Close() error { if es.mu.vers != nil { es.mu.vers.UnrefLocked() } - if es.mu.rs != nil { - es.mu.rs.unrefLocked() - } return nil } @@ -478,31 +446,6 @@ func (es *EventuallyFileOnlySnapshot) NewIter(o *IterOptions) (*Iterator, error) return es.NewIterWithContext(context.Background(), o) } -func (es *EventuallyFileOnlySnapshot) newAlwaysCreateIterWithContext( - ctx context.Context, o *IterOptions, -) (*Iterator, error) { - // Grab the db mutex. This avoids races down below, where we could get - // excised between the es.excised.Load() call, and the newIter call. - es.db.mu.Lock() - defer es.db.mu.Unlock() - es.mu.Lock() - defer es.mu.Unlock() - if es.mu.vers != nil { - sOpts := snapshotIterOpts{seqNum: es.seqNum, vers: es.mu.vers} - return es.db.newIter(ctx, nil /* batch */, newIterOpts{snapshot: sOpts}, o), nil - } - - sOpts := snapshotIterOpts{seqNum: es.seqNum} - if es.excised.Load() { - if es.mu.rs == nil { - return nil, errors.AssertionFailedf("unexpected nil readState in EFOS' alwaysCreateIters mode") - } - sOpts.readState = es.mu.rs - } - iter := es.db.newIter(ctx, nil /* batch */, newIterOpts{snapshot: sOpts}, o) - return iter, nil -} - // NewIterWithContext is like NewIter, and additionally accepts a context for // tracing. func (es *EventuallyFileOnlySnapshot) NewIterWithContext( @@ -514,9 +457,6 @@ func (es *EventuallyFileOnlySnapshot) NewIterWithContext( default: } - if es.alwaysCreateIters { - return es.newAlwaysCreateIterWithContext(ctx, o) - } es.mu.Lock() defer es.mu.Unlock() if es.mu.vers != nil { @@ -525,17 +465,7 @@ func (es *EventuallyFileOnlySnapshot) NewIterWithContext( } sOpts := snapshotIterOpts{seqNum: es.seqNum} - if es.excised.Load() { - return nil, ErrSnapshotExcised - } iter := es.db.newIter(ctx, nil /* batch */, newIterOpts{snapshot: sOpts}, o) - - // If excised is true, then keys relevant to the snapshot might not be - // present in the readState being used by the iterator. - if es.excised.Load() { - iter.Close() - return nil, ErrSnapshotExcised - } return iter, nil } @@ -557,9 +487,6 @@ func (es *EventuallyFileOnlySnapshot) ScanInternal( if es.db == nil { panic(ErrClosed) } - if es.excised.Load() && !es.alwaysCreateIters { - return ErrSnapshotExcised - } var sOpts snapshotIterOpts opts := &scanInternalOptions{ CategoryAndQoS: categoryAndQoS, @@ -574,11 +501,6 @@ func (es *EventuallyFileOnlySnapshot) ScanInternal( visitSharedFile: visitSharedFile, skipSharedLevels: visitSharedFile != nil, } - if es.alwaysCreateIters { - // Grab the db mutex. This avoids races down below as it prevents excises - // from taking effect until the iterator is instantiated. - es.db.mu.Lock() - } es.mu.Lock() if es.mu.vers != nil { sOpts = snapshotIterOpts{ @@ -586,15 +508,8 @@ func (es *EventuallyFileOnlySnapshot) ScanInternal( vers: es.mu.vers, } } else { - if es.excised.Load() && es.alwaysCreateIters { - sOpts = snapshotIterOpts{ - readState: es.mu.rs, - seqNum: es.seqNum, - } - } else { - sOpts = snapshotIterOpts{ - seqNum: es.seqNum, - } + sOpts = snapshotIterOpts{ + seqNum: es.seqNum, } } es.mu.Unlock() @@ -603,16 +518,6 @@ func (es *EventuallyFileOnlySnapshot) ScanInternal( return err } defer iter.close() - if es.alwaysCreateIters { - // See the similar conditional above where we grab this mutex. - es.db.mu.Unlock() - } - - // If excised is true, then keys relevant to the snapshot might not be - // present in the readState being used by the iterator. Error out. - if es.excised.Load() && !es.alwaysCreateIters { - return ErrSnapshotExcised - } return scanInternalImpl(ctx, lower, upper, iter, opts) } diff --git a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go index ed676957d6..705877448e 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_single_lvl.go @@ -165,6 +165,10 @@ type singleLevelIterator struct { lastBloomFilterMatched bool hideObsoletePoints bool + + // inPool is set to true before putting the iterator in the reusable pool; + // used to detect double-close. + inPool bool } // singleLevelIterator implements the base.InternalIterator interface. @@ -203,6 +207,7 @@ func (i *singleLevelIterator) init( i.endKeyInclusive, lower, upper = v.constrainBounds(lower, upper, false /* endInclusive */) } + i.inPool = false i.ctx = ctx i.lower = lower i.upper = upper @@ -274,8 +279,9 @@ func (i *singleLevelIterator) setupForCompaction() { func (i *singleLevelIterator) resetForReuse() singleLevelIterator { return singleLevelIterator{ - index: i.index.resetForReuse(), - data: i.data.resetForReuse(), + index: i.index.resetForReuse(), + data: i.data.resetForReuse(), + inPool: true, } } @@ -919,6 +925,11 @@ func (i *singleLevelIterator) virtualLastSeekLE(key []byte) (*InternalKey, base. return nil, base.LazyValue{} } if result == loadBlockIrrelevant { + // Enforce the lower bound here, as we could have gone past it. + if i.lower != nil && i.cmp(ikey.UserKey, i.lower) < 0 { + i.exhaustedBounds = -1 + return nil, base.LazyValue{} + } // Want to skip to the previous block. return i.skipBackward() } @@ -931,6 +942,11 @@ func (i *singleLevelIterator) virtualLastSeekLE(key []byte) (*InternalKey, base. } ikey, val = i.data.Prev() if ikey != nil { + // Enforce the lower bound here, as we could have gone past it. + if i.blockLower != nil && i.cmp(ikey.UserKey, i.blockLower) < 0 { + i.exhaustedBounds = -1 + return nil, base.LazyValue{} + } return ikey, val } return i.skipBackward() @@ -953,7 +969,7 @@ func (i *singleLevelIterator) SeekLT( // first internal key with user key < key. if cmp > 0 { // Return the last key in the virtual sstable. - return i.virtualLast() + return i.maybeVerifyKey(i.virtualLast()) } } @@ -1133,7 +1149,7 @@ func (i *singleLevelIterator) firstInternal() (*InternalKey, base.LazyValue) { // SeekLT(upper)) func (i *singleLevelIterator) Last() (*InternalKey, base.LazyValue) { if i.vState != nil { - return i.virtualLast() + return i.maybeVerifyKey(i.virtualLast()) } if i.upper != nil { @@ -1440,6 +1456,9 @@ func firstError(err0, err1 error) error { // Close implements internalIterator.Close, as documented in the pebble // package. func (i *singleLevelIterator) Close() error { + if invariants.Enabled && i.inPool { + panic("Close called on interator in pool") + } i.iterStats.close() var err error if i.closeHook != nil { diff --git a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go index 9a2a3f254c..a51cecbd56 100644 --- a/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go +++ b/vendor/github.com/cockroachdb/pebble/sstable/reader_iter_two_lvl.go @@ -10,6 +10,7 @@ import ( "fmt" "github.com/cockroachdb/pebble/internal/base" + "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/objstorage/objstorageprovider/objiotracing" ) @@ -165,6 +166,7 @@ func (i *twoLevelIterator) init( i.endKeyInclusive, lower, upper = v.constrainBounds(lower, upper, false /* endInclusive */) } + i.inPool = false i.ctx = ctx i.lower = lower i.upper = upper @@ -617,6 +619,9 @@ func (i *twoLevelIterator) virtualLastSeekLE(key []byte) (*InternalKey, base.Laz return nil, base.LazyValue{} } if result == loadBlockIrrelevant { + if i.lower != nil && i.cmp(ikey.UserKey, i.lower) < 0 { + i.exhaustedBounds = -1 + } // Load the previous block. return i.skipBackward() } @@ -979,6 +984,9 @@ func (i *twoLevelIterator) skipBackward() (*InternalKey, base.LazyValue) { // Close implements internalIterator.Close, as documented in the pebble // package. func (i *twoLevelIterator) Close() error { + if invariants.Enabled && i.inPool { + panic("Close called on interator in pool") + } i.iterStats.close() var err error if i.closeHook != nil { diff --git a/vendor/github.com/cockroachdb/pebble/table_cache.go b/vendor/github.com/cockroachdb/pebble/table_cache.go index b730ffec89..350e649f8c 100644 --- a/vendor/github.com/cockroachdb/pebble/table_cache.go +++ b/vendor/github.com/cockroachdb/pebble/table_cache.go @@ -48,6 +48,75 @@ func (s *filteredAllKeysIter) MaybeFilteredKeys() bool { return true } +// tableNewIters creates new iterators (point, range deletion and/or range key) +// for the given file metadata. Which of the various iterator kinds the user is +// requesting is specified with the iterKinds bitmap. +// +// On success, the requested subset of iters.{point,rangeDel,rangeKey} are +// populated with iterators. +// +// If a point iterator is requested and the operation was successful, +// iters.point is guaranteed to be non-nil and must be closed when the caller is +// finished. +// +// If a range deletion or range key iterator is requested, the corresponding +// iterator may be nil if the table does not contain any keys of the +// corresponding kind. The returned iterSet type provides RangeDeletion() and +// RangeKey() convenience methods that return non-nil empty iterators that may +// be used if the caller requires a non-nil iterator. +// +// On error, all iterators are nil. +// +// The only (non-test) implementation of tableNewIters is +// tableCacheContainer.newIters(). +type tableNewIters func( + ctx context.Context, + file *manifest.FileMetadata, + opts *IterOptions, + internalOpts internalIterOpts, + kinds iterKinds, +) (iterSet, error) + +// TODO implements the old tableNewIters interface that always attempted to open +// a point iterator and a range deletion iterator. All call sites should be +// updated to use `f` directly. +func (f tableNewIters) TODO( + ctx context.Context, + file *manifest.FileMetadata, + opts *IterOptions, + internalOpts internalIterOpts, +) (internalIterator, keyspan.FragmentIterator, error) { + iters, err := f(ctx, file, opts, internalOpts, iterPointKeys|iterRangeDeletions) + if err != nil { + return nil, nil, err + } + return iters.point, iters.rangeDeletion, nil +} + +// tableNewRangeDelIter takes a tableNewIters and returns a TableNewSpanIter +// for the rangedel iterator returned by tableNewIters. +func tableNewRangeDelIter(ctx context.Context, newIters tableNewIters) keyspan.TableNewSpanIter { + return func(file *manifest.FileMetadata, iterOptions keyspan.SpanIterOptions) (keyspan.FragmentIterator, error) { + iters, err := newIters(ctx, file, nil, internalIterOpts{}, iterRangeDeletions) + if err != nil { + return nil, err + } + return iters.RangeDeletion(), nil + } +} + +// tableNewRangeKeyIter takes a tableNewIters and returns a TableNewSpanIter +// for the range key iterator returned by tableNewIters. +func tableNewRangeKeyIter(ctx context.Context, newIters tableNewIters) keyspan.TableNewSpanIter { + return func(file *manifest.FileMetadata, iterOptions keyspan.SpanIterOptions) (keyspan.FragmentIterator, error) { + iters, err := newIters(ctx, file, nil, internalIterOpts{}, iterRangeKeys) + if err != nil { + return nil, err + } + return iters.RangeKey(), nil + } +} + var tableCacheLabels = pprof.Labels("pebble", "table-cache") // tableCacheOpts contains the db specific fields @@ -140,14 +209,9 @@ func (c *tableCacheContainer) newIters( file *manifest.FileMetadata, opts *IterOptions, internalOpts internalIterOpts, -) (internalIterator, keyspan.FragmentIterator, error) { - return c.tableCache.getShard(file.FileBacking.DiskFileNum).newIters(ctx, file, opts, internalOpts, &c.dbOpts) -} - -func (c *tableCacheContainer) newRangeKeyIter( - file *manifest.FileMetadata, opts keyspan.SpanIterOptions, -) (keyspan.FragmentIterator, error) { - return c.tableCache.getShard(file.FileBacking.DiskFileNum).newRangeKeyIter(file, opts, &c.dbOpts) + kinds iterKinds, +) (iterSet, error) { + return c.tableCache.getShard(file.FileBacking.DiskFileNum).newIters(ctx, file, opts, internalOpts, &c.dbOpts, kinds) } // getTableProperties returns the properties associated with the backing physical @@ -201,17 +265,13 @@ func (c *tableCacheContainer) estimateSize( return size, nil } -// createCommonReader creates a Reader for this file. isShared, if true for -// virtual sstables, is passed into the vSSTable reader so its iterators can -// collapse obsolete points accordingly. -func createCommonReader( - v *tableCacheValue, file *fileMetadata, isShared bool, -) sstable.CommonReader { +// createCommonReader creates a Reader for this file. +func createCommonReader(v *tableCacheValue, file *fileMetadata) sstable.CommonReader { // TODO(bananabrick): We suffer an allocation if file is a virtual sstable. var cr sstable.CommonReader = v.reader if file.Virtual { virtualReader := sstable.MakeVirtualReader( - v.reader, file.VirtualMeta(), isShared, + v.reader, file.VirtualMeta(), v.isShared, ) cr = &virtualReader } @@ -227,12 +287,7 @@ func (c *tableCacheContainer) withCommonReader( if v.err != nil { return v.err } - provider := c.dbOpts.objProvider - objMeta, err := provider.Lookup(fileTypeTable, meta.FileBacking.DiskFileNum) - if err != nil { - return err - } - return fn(createCommonReader(v, meta, objMeta.IsShared())) + return fn(createCommonReader(v, meta)) } func (c *tableCacheContainer) withReader(meta physicalMeta, fn func(*sstable.Reader) error) error { @@ -430,7 +485,8 @@ func (c *tableCacheShard) newIters( opts *IterOptions, internalOpts internalIterOpts, dbOpts *tableCacheOpts, -) (internalIterator, keyspan.FragmentIterator, error) { + kinds iterKinds, +) (iterSet, error) { // TODO(sumeer): constructing the Reader should also use a plumbed context, // since parts of the sstable are read during the construction. The Reader // should not remember that context since the Reader can be long-lived. @@ -442,11 +498,64 @@ func (c *tableCacheShard) newIters( v := c.findNode(file, dbOpts) if v.err != nil { defer c.unrefValue(v) - return nil, nil, v.err + return iterSet{}, v.err + } + + // Note: This suffers an allocation for virtual sstables. + cr := createCommonReader(v, file) + var iters iterSet + var err error + if kinds.RangeKey() && file.HasRangeKeys { + iters.rangeKey, err = c.newRangeKeyIter(v, cr, opts.SpanIterOptions()) + } + if kinds.RangeDeletion() && file.HasPointKeys && err == nil { + iters.rangeDeletion, err = c.newRangeDelIter(ctx, file, cr, dbOpts) + } + if kinds.Point() && err == nil { + iters.point, err = c.newPointIter(ctx, v, file, cr, opts, internalOpts, dbOpts) + } + if err != nil { + // NB: There's a subtlety here: Because the point iterator is the last + // iterator we attempt to create, it's not possible for: + // err != nil && iters.point != nil + // If it were possible, we'd need to account for it to avoid double + // unref-ing here, once during CloseAll and once during `unrefValue`. + iters.CloseAll() + c.unrefValue(v) + return iterSet{}, err + } + // Only point iterators ever require the reader stay pinned in the cache. If + // we're not returning a point iterator to the caller, we need to unref v. + // There's an added subtlety that iters.point may be non-nil but not a true + // iterator that's ref'd the underlying reader if block filters excluded the + // entirety of the table. + // + // TODO(jackson): This `filteredAll` subtlety can be removed after the + // planned #2863 refactor, when there will be no need to return a special + // empty iterator type to signify that point keys were filtered. + if iters.point == nil || iters.point == filteredAll { + c.unrefValue(v) } + return iters, nil +} - hideObsoletePoints := false - var pointKeyFilters []BlockPropertyFilter +// newPointIter is an internal helper that constructs a point iterator over a +// sstable. This function is for internal use only, and callers should use +// newIters instead. +func (c *tableCacheShard) newPointIter( + ctx context.Context, + v *tableCacheValue, + file *manifest.FileMetadata, + cr sstable.CommonReader, + opts *IterOptions, + internalOpts internalIterOpts, + dbOpts *tableCacheOpts, +) (internalIterator, error) { + var ( + hideObsoletePoints bool = false + pointKeyFilters []BlockPropertyFilter + filterer *sstable.BlockPropertiesFilterer + ) if opts != nil { // This code is appending (at most one filter) in-place to // opts.PointKeyFilters even though the slice is shared for iterators in @@ -470,64 +579,24 @@ func (c *tableCacheShard) newIters( hideObsoletePoints, pointKeyFilters = v.reader.TryAddBlockPropertyFilterForHideObsoletePoints( opts.snapshotForHideObsoletePoints, file.LargestSeqNum, opts.PointKeyFilters) - } - ok := true - var filterer *sstable.BlockPropertiesFilterer - var err error - if opts != nil { + + var ok bool + var err error ok, filterer, err = c.checkAndIntersectFilters(v, opts.TableFilter, pointKeyFilters, internalOpts.boundLimitedFilter) - } - if err != nil { - c.unrefValue(v) - return nil, nil, err - } - - provider := dbOpts.objProvider - // Check if this file is a foreign file. - objMeta, err := provider.Lookup(fileTypeTable, file.FileBacking.DiskFileNum) - if err != nil { - return nil, nil, err - } - - // Note: This suffers an allocation for virtual sstables. - cr := createCommonReader(v, file, objMeta.IsShared()) - - // NB: range-del iterator does not maintain a reference to the table, nor - // does it need to read from it after creation. - rangeDelIter, err := cr.NewRawRangeDelIter() - if err != nil { - c.unrefValue(v) - return nil, nil, err - } - - // Assert expected bounds in tests. - if invariants.Enabled && rangeDelIter != nil { - cmp := base.DefaultComparer.Compare - if dbOpts.opts.Comparer != nil { - cmp = dbOpts.opts.Comparer.Compare + if err != nil { + return nil, err + } else if !ok { + // Return an empty iterator. This iterator has no mutable state, so + // using a singleton is fine. We must return `filteredAll` instead + // of nil so that the returned iterator returns MaybeFilteredKeys() + // = true. + // + // TODO(jackson): This `filteredAll` subtlety can be removed after the + // planned #2863 refactor, when there will be no need to return a special + // empty iterator type to signify that point keys were filtered. + return filteredAll, err } - // TODO(radu): we should be using AssertBounds, but it currently fails in - // some cases (#3167). - rangeDelIter = keyspan.AssertUserKeyBounds( - rangeDelIter, file.SmallestPointKey.UserKey, file.LargestPointKey.UserKey, cmp, - ) - } - - if !ok { - c.unrefValue(v) - // Return an empty iterator. This iterator has no mutable state, so - // using a singleton is fine. - // NB: We still return the potentially non-empty rangeDelIter. This - // ensures the iterator observes the file's range deletions even if the - // block property filters exclude all the file's point keys. The range - // deletions may still delete keys lower in the LSM in files that DO - // match the active filters. - // - // The point iterator returned must implement the filteredIter - // interface, so that the level iterator surfaces file boundaries when - // range deletions are present. - return filteredAll, rangeDelIter, err } var iter sstable.Iterator @@ -538,16 +607,16 @@ func (c *tableCacheShard) newIters( } tableFormat, err := v.reader.TableFormat() if err != nil { - return nil, nil, err + return nil, err } var rp sstable.ReaderProvider if tableFormat >= sstable.TableFormatPebblev3 && v.reader.Properties.NumValueBlocks > 0 { rp = &tableCacheShardReaderProvider{c: c, file: file, dbOpts: dbOpts} } - if objMeta.IsShared() && v.reader.Properties.GlobalSeqNum != 0 { + if v.isShared && v.reader.Properties.GlobalSeqNum != 0 { if tableFormat < sstable.TableFormatPebblev4 { - return nil, nil, errors.New("pebble: shared ingested sstable has a lower table format than expected") + return nil, errors.New("pebble: shared ingested sstable has a lower table format than expected") } // The table is shared and ingested. hideObsoletePoints = true @@ -566,16 +635,11 @@ func (c *tableCacheShard) newIters( internalOpts.stats, categoryAndQoS, dbOpts.sstStatsCollector, rp) } if err != nil { - if rangeDelIter != nil { - _ = rangeDelIter.Close() - } - c.unrefValue(v) - return nil, nil, err + return nil, err } // NB: v.closeHook takes responsibility for calling unrefValue(v) here. Take // care to avoid introducing an allocation here by adding a closure. iter.SetCloseHook(v.closeHook) - c.iterCount.Add(1) dbOpts.iterCount.Add(1) if invariants.RaceEnabled { @@ -583,73 +647,56 @@ func (c *tableCacheShard) newIters( c.mu.iters[iter] = debug.Stack() c.mu.Unlock() } - return iter, rangeDelIter, nil + return iter, nil } -func (c *tableCacheShard) newRangeKeyIter( - file *manifest.FileMetadata, opts keyspan.SpanIterOptions, dbOpts *tableCacheOpts, +// newRangeDelIter is an internal helper that constructs an iterator over a +// sstable's range deletions. This function is for table-cache internal use +// only, and callers should use newIters instead. +func (c *tableCacheShard) newRangeDelIter( + ctx context.Context, file *manifest.FileMetadata, cr sstable.CommonReader, dbOpts *tableCacheOpts, ) (keyspan.FragmentIterator, error) { - // Calling findNode gives us the responsibility of decrementing v's - // refCount. If opening the underlying table resulted in error, then we - // decrement this straight away. Otherwise, we pass that responsibility to - // the sstable iterator, which decrements when it is closed. - v := c.findNode(file, dbOpts) - if v.err != nil { - defer c.unrefValue(v) - return nil, v.err + // NB: range-del iterator does not maintain a reference to the table, nor + // does it need to read from it after creation. + rangeDelIter, err := cr.NewRawRangeDelIter() + if err != nil { + return nil, err + } + // Assert expected bounds in tests. + if invariants.Enabled && rangeDelIter != nil { + cmp := base.DefaultComparer.Compare + if dbOpts.opts.Comparer != nil { + cmp = dbOpts.opts.Comparer.Compare + } + // TODO(radu): we should be using AssertBounds, but it currently fails in + // some cases (#3167). + rangeDelIter = keyspan.AssertUserKeyBounds( + rangeDelIter, file.SmallestPointKey.UserKey, file.LargestPointKey.UserKey, cmp, + ) } + return rangeDelIter, nil +} - ok := true - var err error +// newRangeKeyIter is an internal helper that constructs an iterator over a +// sstable's range keys. This function is for table-cache internal use only, and +// callers should use newIters instead. +func (c *tableCacheShard) newRangeKeyIter( + v *tableCacheValue, cr sstable.CommonReader, opts keyspan.SpanIterOptions, +) (keyspan.FragmentIterator, error) { // Don't filter a table's range keys if the file contains RANGEKEYDELs. // The RANGEKEYDELs may delete range keys in other levels. Skipping the // file's range key blocks may surface deleted range keys below. This is // done here, rather than deferring to the block-property collector in order // to maintain parity with point keys and the treatment of RANGEDELs. - if v.reader.Properties.NumRangeKeyDels == 0 { - ok, _, err = c.checkAndIntersectFilters(v, nil, opts.RangeKeyFilters, nil) - } - if err != nil { - c.unrefValue(v) - return nil, err - } - if !ok { - c.unrefValue(v) - // Return the empty iterator. This iterator has no mutable state, so - // using a singleton is fine. - return emptyKeyspanIter, err - } - - var iter keyspan.FragmentIterator - if file.Virtual { - provider := dbOpts.objProvider - var objMeta objstorage.ObjectMetadata - objMeta, err = provider.Lookup(fileTypeTable, file.FileBacking.DiskFileNum) - if err == nil { - virtualReader := sstable.MakeVirtualReader( - v.reader, file.VirtualMeta(), objMeta.IsShared(), - ) - iter, err = virtualReader.NewRawRangeKeyIter() + if v.reader.Properties.NumRangeKeyDels == 0 && len(opts.RangeKeyFilters) > 0 { + ok, _, err := c.checkAndIntersectFilters(v, nil, opts.RangeKeyFilters, nil) + if err != nil { + return nil, err + } else if !ok { + return nil, nil } - } else { - iter, err = v.reader.NewRawRangeKeyIter() } - - // iter is a block iter that holds the entire value of the block in memory. - // No need to hold onto a ref of the cache value. - c.unrefValue(v) - - if err != nil { - return nil, err - } - - if iter == nil { - // NewRawRangeKeyIter can return nil even if there's no error. However, - // the keyspan.LevelIter expects a non-nil iterator if err is nil. - return emptyKeyspanIter, nil - } - - return iter, nil + return cr.NewRawRangeKeyIter() } type tableCacheShardReaderProvider struct { @@ -1104,6 +1151,7 @@ type tableCacheValue struct { closeHook func(i sstable.Iterator) error reader *sstable.Reader err error + isShared bool loaded chan struct{} // Reference count for the value. The reader is closed when the reference // count drops to zero. @@ -1127,6 +1175,11 @@ func (v *tableCacheValue) load(loadInfo loadInfo, c *tableCacheShard, dbOpts *ta cacheOpts := private.SSTableCacheOpts(dbOpts.cacheID, loadInfo.backingFileNum).(sstable.ReaderOption) v.reader, err = sstable.NewReader(f, dbOpts.opts, cacheOpts, dbOpts.filterMetrics) } + if err == nil { + var objMeta objstorage.ObjectMetadata + objMeta, err = dbOpts.objProvider.Lookup(fileTypeTable, loadInfo.backingFileNum) + v.isShared = objMeta.IsShared() + } if err != nil { v.err = errors.Wrapf( err, "pebble: backing file %s error", loadInfo.backingFileNum) @@ -1225,3 +1278,75 @@ func (n *tableCacheNode) unlink() *tableCacheNode { n.links.next = n return next } + +// iterSet holds a set of iterators of various key kinds, all constructed over +// the same data structure (eg, an sstable). A subset of the fields may be +// populated depending on the `iterKinds` passed to newIters. +type iterSet struct { + point internalIterator + rangeDeletion keyspan.FragmentIterator + rangeKey keyspan.FragmentIterator +} + +// TODO(jackson): Consider adding methods for fast paths that check whether an +// iterator of a particular kind is nil, so that these call sites don't need to +// reach into the struct's fields directly. + +// Point returns the contained point iterator. If there is no point iterator, +// Point returns a non-nil empty point iterator. +func (s *iterSet) Point() internalIterator { + if s.point == nil { + return emptyIter + } + return s.point +} + +// RangeDeletion returns the contained range deletion iterator. If there is no +// range deletion iterator, RangeDeletion returns a non-nil empty keyspan +// iterator. +func (s *iterSet) RangeDeletion() keyspan.FragmentIterator { + if s.rangeDeletion == nil { + return emptyKeyspanIter + } + return s.rangeDeletion +} + +// RangeKey returns the contained range key iterator. If there is no range key +// iterator, RangeKey returns a non-nil empty keyspan iterator. +func (s *iterSet) RangeKey() keyspan.FragmentIterator { + if s.rangeKey == nil { + return emptyKeyspanIter + } + return s.rangeKey +} + +// CloseAll closes all of the held iterators. If CloseAll is called, then Close +// must be not be called on the consitutent iterators. +func (s *iterSet) CloseAll() error { + var err error + if s.point != nil { + err = s.point.Close() + } + if s.rangeDeletion != nil { + err = firstError(err, s.rangeDeletion.Close()) + } + if s.rangeKey != nil { + err = firstError(err, s.rangeKey.Close()) + } + return err +} + +// iterKinds is a bitmap indicating a set of kinds of iterators. Callers may +// bitwise-OR iterPointKeys, iterRangeDeletions and/or iterRangeKeys together to +// represent a set of desired iterator kinds. +type iterKinds uint8 + +func (t iterKinds) Point() bool { return (t & iterPointKeys) != 0 } +func (t iterKinds) RangeDeletion() bool { return (t & iterRangeDeletions) != 0 } +func (t iterKinds) RangeKey() bool { return (t & iterRangeKeys) != 0 } + +const ( + iterPointKeys iterKinds = 1 << iota + iterRangeDeletions + iterRangeKeys +) diff --git a/vendor/github.com/cockroachdb/pebble/table_stats.go b/vendor/github.com/cockroachdb/pebble/table_stats.go index f2afbd848c..7e8e8b6d80 100644 --- a/vendor/github.com/cockroachdb/pebble/table_stats.go +++ b/vendor/github.com/cockroachdb/pebble/table_stats.go @@ -910,23 +910,52 @@ func newCombinedDeletionKeyspanIter( return nil, err } if iter != nil { - // Assert expected bounds in tests. - if invariants.Enabled { - // TODO(radu): we should be using AssertBounds, but it currently fails in - // some cases (#3167). - iter = keyspan.AssertUserKeyBounds( - iter, m.SmallestPointKey.UserKey, m.LargestPointKey.UserKey, comparer.Compare, - ) - } + // Assert expected bounds. In previous versions of Pebble, range + // deletions persisted to sstables could exceed the bounds of the + // containing files due to "split user keys." This required readers to + // constrain the tombstones' bounds to the containing file at read time. + // See docs/range_deletions.md for an extended discussion of the design + // and invariants at that time. + // + // We've since compacted away all 'split user-keys' and in the process + // eliminated all "untruncated range tombstones" for physical sstables. + // We no longer need to perform truncation at read time for these + // sstables. + // + // At the same time, we've also introduced the concept of "virtual + // SSTables" where the file metadata's effective bounds can again be + // reduced to be narrower than the contained tombstones. These virtual + // SSTables handle truncation differently, performing it using + // keyspan.Truncate when the sstable's range deletion iterator is + // opened. + // + // Together, these mean that we should never see untruncated range + // tombstones any more—and the merging iterator no longer accounts for + // their existence. Since there's abundant subtlety that we're relying + // on, we choose to be conservative and assert that these invariants + // hold. We could (and previously did) choose to only validate these + // bounds in invariants builds, but the most likely avenue for these + // tombstones' existence is through a bug in a migration and old data + // sitting around in an old store from long ago. + // + // The table stats collector will read all files range deletions + // asynchronously after Open, and provides a perfect opportunity to + // validate our invariants without harming user latency. We also + // previously performed truncation here which similarly required key + // comparisons, so replacing those key comparisons with assertions + // should be roughly similar in performance. + // + // TODO(jackson): Only use Assert[UserKey]Bounds in invariants builds + // in the following release. + // + // TODO(radu): we should be using AssertBounds, but it currently fails in + // some cases (#3167). + iter = keyspan.AssertUserKeyBounds( + iter, m.SmallestPointKey.UserKey, m.LargestPointKey.UserKey, comparer.Compare, + ) dIter := &keyspan.DefragmentingIter{} dIter.Init(comparer, iter, equal, reducer, new(keyspan.DefragmentingBuffers)) iter = dIter - // Truncate tombstones to the containing file's bounds if necessary. - // See docs/range_deletions.md for why this is necessary. - iter = keyspan.Truncate( - comparer.Compare, iter, m.Smallest.UserKey, m.Largest.UserKey, - nil, nil, false, /* panicOnUpperTruncate */ - ) mIter.AddLevel(iter) } diff --git a/vendor/github.com/cockroachdb/pebble/version_set.go b/vendor/github.com/cockroachdb/pebble/version_set.go index b094385487..58b534c873 100644 --- a/vendor/github.com/cockroachdb/pebble/version_set.go +++ b/vendor/github.com/cockroachdb/pebble/version_set.go @@ -598,8 +598,8 @@ func (vs *versionSet) logAndApply( if newManifestFileNum != 0 { if vs.manifestFileNum != 0 { vs.obsoleteManifests = append(vs.obsoleteManifests, fileInfo{ - fileNum: vs.manifestFileNum, - fileSize: prevManifestFileSize, + FileNum: vs.manifestFileNum, + FileSize: prevManifestFileSize, }) } vs.manifestFileNum = newManifestFileNum @@ -829,14 +829,14 @@ func (vs *versionSet) addObsoleteLocked(obsolete []*fileBacking) { obsoleteFileInfo := make([]fileInfo, len(obsolete)) for i, bs := range obsolete { - obsoleteFileInfo[i].fileNum = bs.DiskFileNum - obsoleteFileInfo[i].fileSize = bs.Size + obsoleteFileInfo[i].FileNum = bs.DiskFileNum + obsoleteFileInfo[i].FileSize = bs.Size } if invariants.Enabled { dedup := make(map[base.DiskFileNum]struct{}) for _, fi := range obsoleteFileInfo { - dedup[fi.fileNum] = struct{}{} + dedup[fi.FileNum] = struct{}{} } if len(dedup) != len(obsoleteFileInfo) { panic("pebble: duplicate FileBacking present in obsolete list") @@ -847,8 +847,8 @@ func (vs *versionSet) addObsoleteLocked(obsolete []*fileBacking) { // Note that the obsolete tables are no longer zombie by the definition of // zombie, but we leave them in the zombie tables map until they are // deleted from disk. - if _, ok := vs.zombieTables[fi.fileNum]; !ok { - vs.opts.Logger.Fatalf("MANIFEST obsolete table %s not marked as zombie", fi.fileNum) + if _, ok := vs.zombieTables[fi.FileNum]; !ok { + vs.opts.Logger.Fatalf("MANIFEST obsolete table %s not marked as zombie", fi.FileNum) } } @@ -868,7 +868,7 @@ func (vs *versionSet) updateObsoleteTableMetricsLocked() { vs.metrics.Table.ObsoleteCount = int64(len(vs.obsoleteTables)) vs.metrics.Table.ObsoleteSize = 0 for _, fi := range vs.obsoleteTables { - vs.metrics.Table.ObsoleteSize += fi.fileSize + vs.metrics.Table.ObsoleteSize += fi.FileSize } } diff --git a/vendor/github.com/cockroachdb/pebble/log_recycler.go b/vendor/github.com/cockroachdb/pebble/wal/log_recycler.go similarity index 58% rename from vendor/github.com/cockroachdb/pebble/log_recycler.go rename to vendor/github.com/cockroachdb/pebble/wal/log_recycler.go index 3c01fde09a..d66483fafa 100644 --- a/vendor/github.com/cockroachdb/pebble/log_recycler.go +++ b/vendor/github.com/cockroachdb/pebble/wal/log_recycler.go @@ -2,7 +2,7 @@ // of this source code is governed by a BSD-style license that can be found in // the LICENSE file. -package pebble +package wal import ( "sync" @@ -11,7 +11,10 @@ import ( "github.com/cockroachdb/pebble/internal/base" ) -type logRecycler struct { +// TODO(sumeer): hide LogRecycler once rest of Pebble is using wal.Manager. + +// LogRecycler recycles WAL log files. +type LogRecycler struct { // The maximum number of log files to maintain for recycling. limit int @@ -24,23 +27,40 @@ type logRecycler struct { mu struct { sync.Mutex - logs []fileInfo + logs []base.FileInfo maxLogNum base.DiskFileNum } } -// add attempts to recycle the log file specified by logInfo. Returns true if +// Init initialized the LogRecycler. +func (r *LogRecycler) Init(maxNumLogFiles int) { + r.limit = maxNumLogFiles +} + +// MinRecycleLogNum returns the current minimum log number that is allowed to +// be recycled. +func (r *LogRecycler) MinRecycleLogNum() base.DiskFileNum { + return r.minRecycleLogNum +} + +// SetMinRecycleLogNum sets the minimum log number that is allowed to be +// recycled. +func (r *LogRecycler) SetMinRecycleLogNum(n base.DiskFileNum) { + r.minRecycleLogNum = n +} + +// Add attempts to recycle the log file specified by logInfo. Returns true if // the log file should not be deleted (i.e. the log is being recycled), and // false otherwise. -func (r *logRecycler) add(logInfo fileInfo) bool { - if logInfo.fileNum < r.minRecycleLogNum { +func (r *LogRecycler) Add(logInfo base.FileInfo) bool { + if logInfo.FileNum < r.minRecycleLogNum { return false } r.mu.Lock() defer r.mu.Unlock() - if logInfo.fileNum <= r.mu.maxLogNum { + if logInfo.FileNum <= r.mu.maxLogNum { // The log file number was already considered for recycling. Don't consider // it again. This avoids a race between adding the same log file for // recycling multiple times, and removing the log file for actual @@ -50,7 +70,7 @@ func (r *logRecycler) add(logInfo fileInfo) bool { // shouldn't be deleted. return true } - r.mu.maxLogNum = logInfo.fileNum + r.mu.maxLogNum = logInfo.FileNum if len(r.mu.logs) >= r.limit { return false } @@ -58,52 +78,66 @@ func (r *logRecycler) add(logInfo fileInfo) bool { return true } -// peek returns the log at the head of the recycling queue, or the zero value +// Peek returns the log at the head of the recycling queue, or the zero value // fileInfo and false if the queue is empty. -func (r *logRecycler) peek() (fileInfo, bool) { +func (r *LogRecycler) Peek() (base.FileInfo, bool) { r.mu.Lock() defer r.mu.Unlock() if len(r.mu.logs) == 0 { - return fileInfo{}, false + return base.FileInfo{}, false } return r.mu.logs[0], true } -func (r *logRecycler) stats() (count int, size uint64) { +// Stats return current stats. +func (r *LogRecycler) Stats() (count int, size uint64) { r.mu.Lock() defer r.mu.Unlock() count = len(r.mu.logs) for i := 0; i < count; i++ { - size += r.mu.logs[i].fileSize + size += r.mu.logs[i].FileSize } return count, size } -// pop removes the log number at the head of the recycling queue, enforcing +// Pop removes the log number at the head of the recycling queue, enforcing // that it matches the specified logNum. An error is returned of the recycling // queue is empty or the head log number does not match the specified one. -func (r *logRecycler) pop(logNum base.DiskFileNum) error { +func (r *LogRecycler) Pop(logNum base.DiskFileNum) error { r.mu.Lock() defer r.mu.Unlock() if len(r.mu.logs) == 0 { return errors.New("pebble: log recycler empty") } - if r.mu.logs[0].fileNum != logNum { + if r.mu.logs[0].FileNum != logNum { return errors.Errorf("pebble: log recycler invalid %d vs %v", logNum, errors.Safe(fileInfoNums(r.mu.logs))) } r.mu.logs = r.mu.logs[1:] return nil } -func fileInfoNums(finfos []fileInfo) []base.DiskFileNum { +// LogNumsForTesting returns the current set of recyclable logs. +func (r *LogRecycler) LogNumsForTesting() []base.DiskFileNum { + r.mu.Lock() + defer r.mu.Unlock() + return fileInfoNums(r.mu.logs) +} + +func (r *LogRecycler) maxLogNumForTesting() base.DiskFileNum { + r.mu.Lock() + defer r.mu.Unlock() + return r.mu.maxLogNum +} + +func fileInfoNums(finfos []base.FileInfo) []base.DiskFileNum { if len(finfos) == 0 { return nil } nums := make([]base.DiskFileNum, len(finfos)) for i := range finfos { - nums[i] = finfos[i].fileNum + nums[i] = finfos[i].FileNum } return nums } diff --git a/vendor/github.com/cockroachdb/pebble/wal/wal.go b/vendor/github.com/cockroachdb/pebble/wal/wal.go new file mode 100644 index 0000000000..d9d08456e3 --- /dev/null +++ b/vendor/github.com/cockroachdb/pebble/wal/wal.go @@ -0,0 +1,185 @@ +// Copyright 2024 The LevelDB-Go and Pebble Authors. All rights reserved. Use +// of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package wal + +import ( + "fmt" + "io" + "sync" + "time" + + "github.com/cockroachdb/pebble/internal/base" + "github.com/cockroachdb/pebble/record" + "github.com/cockroachdb/pebble/vfs" + "github.com/prometheus/client_golang/prometheus" +) + +// TODO(sumeer): write a high-level comment describing the approach. + +// Dir is used for storing log files. +type Dir struct { + FS vfs.FS + Dirname string +} + +// NumWAL is the number of the virtual WAL. It can map to one or more physical +// log files. In standalone mode, it will map to exactly one log file. In +// failover mode, it can map to many log files, which are totally ordered +// (using a dense logIndex). +// +// In general, WAL refers to the virtual WAL, and file refers to a log file. +// The Pebble MANIFEST only knows about virtual WALs and assigns numbers to +// them. Additional mapping to one or more files happens in this package. If a +// WAL maps to multiple files, the source of truth regarding that mapping is +// the contents of the directories. +type NumWAL base.DiskFileNum + +// logIndex numbers log files within a WAL. +type logIndex uint32 + +// TODO(sumeer): parsing func. And remove attempts to parse log files outside +// the wal package (including tools). + +// makeLogFilename makes a log filename. +func makeLogFilename(wn NumWAL, index logIndex) string { + if index == 0 { + // Use a backward compatible name, for simplicity. + return base.MakeFilename(base.FileTypeLog, base.DiskFileNum(wn)) + } + return fmt.Sprintf("%s-%03d.log", base.DiskFileNum(wn).String(), index) +} + +// Options provides configuration for the Manager. +type Options struct { + // Primary dir for storing WAL files. + Primary Dir + // Secondary is used for failover. Optional. + Secondary Dir + + // Recyling configuration. Only files in the primary dir are recycled. + + // MinRecycleLogNum is the minimum log file number that is allowed to be + // recycled. Log file numbers smaller than this will be deleted. This is + // used to prevent recycling a log written by a previous instance of the DB + // which may not have had log recycling enabled. + MinRecycleLogNum base.DiskFileNum + // MaxNumRecyclableLogs is the maximum number of log files to maintain for + // recycling. + MaxNumRecyclableLogs int + + // SyncingFileOptions is the configuration when calling vfs.NewSyncingFile. + SyncingFileOpts vfs.SyncingFileOptions + + // MinSyncInterval is documented in Options.WALMinSyncInterval. + MinSyncInterval func() time.Duration + // FsyncLatency records fsync latency. This doesn't differentiate between + // fsyncs on the primary and secondary dir. + // + // TODO(sumeer): consider separating out into two histograms. + FsyncLatency prometheus.Histogram + // QueueSemChan is the channel to pop from when popping from queued records + // that have requested a sync. It's original purpose was to function as a + // semaphore that prevents the record.LogWriter.flusher.syncQueue from + // overflowing (which will cause a panic). It is still useful in that role + // when the WALManager is configured in standalone mode. In failover mode + // there is no syncQueue, so the pushback into the commit pipeline is + // unnecessary, but possibly harmless. + QueueSemChan chan struct{} +} + +// Stats exposes stats used in Pebble metrics. +type Stats struct { + // ObsoleteFileCount is the number of obsolete log files. + ObsoleteFileCount int + // ObsoleteFileSize is the total size of obsolete log files. + ObsoleteFileSize uint64 + // LiveFileCount is the number of live log files. + LiveFileCount int + // LiveFileSize is the total size of live log files. This can be higher than + // LiveSize due to log recycling (a live log file may be larger than the + // size used in its latest incarnation), or failover (resulting in multiple + // log files containing the same records). + LiveFileSize uint64 + // LiveSize is the total size of the live data in log files. + LiveSize uint64 +} + +// Manager handles all WAL work. +// +// It is an interface for now, but if we end up with a single implementation +// for both standalone mode and failover mode, we will get rid of the +// interface. +type Manager interface { + // Init initializes the Manager. + // + // Implementation notes: + // - lists and stats the directories, so that Stats are up to date (assuming + // no obsolete files yet), and the list of WALs and their constituent log + // files is initialized. + // - ensures dirs are created and synced. + Init(o Options) error + // List returns the virtual WALs in ascending order. + List() ([]NumWAL, error) + // Delete deletes all virtual WALs up to highestObsoleteNum. The + // underlying physical WAL files may be recycled. + Delete(highestObsoleteNum NumWAL) error + // OpenForRead opens a virtual WAL for read. + OpenForRead(wn NumWAL, strictWALTail bool) (Reader, error) + // Create creates a new virtual WAL. + // + // NumWALs passed to successive Create calls must be monotonically + // increasing, and be greater than any NumWAL seen earlier. The caller must + // close the previous Writer before calling Create. + Create(wn NumWAL) (Writer, error) + // Stats returns the latest Stats. + Stats() Stats + // Close the manager. + Close() error +} + +// SyncOptions has non-nil Done and Err when fsync is requested, else both are +// nil. +type SyncOptions struct { + Done *sync.WaitGroup + Err *error +} + +// Writer writes to a virtual WAL. A Writer in standalone mode maps to a +// single record.LogWriter. In failover mode, it can failover across multiple +// physical log files. +type Writer interface { + // Size based on writes. + Size() uint64 + // FileSize is the size of the file(s) underlying this WAL. FileSize + // >= Size because of recycling and failover. This is an estimate. + FileSize() uint64 + // WriteRecord writes a complete record. The record is asynchronously + // persisted to the underlying writer. If SyncOptions.Done != nil, the wait + // group will be notified when durability is guaranteed or an error has + // occurred (set in SyncOptions.Err). External synchronisation provided by + // commitPipeline.mu guarantees that WriteRecord calls are serialized. + WriteRecord(p []byte, opts SyncOptions) error + // Close the writer. + Close() error + // Metrics must be called after Close. The callee will no longer modify the + // returned LogWriterMetrics. + Metrics() *record.LogWriterMetrics +} + +// Reader reads a virtual WAL. +type Reader interface { + // NextRecord returns the next record, or error. + NextRecord() (io.Reader, error) + // LogicalOffset is the monotonically increasing offset in the WAL. When the + // WAL corresponds to a single log file, this is the offset in that log + // file. + LogicalOffset() int64 + // Close the reader. + Close() error +} + +// Make lint happy. +var _ logIndex = 0 +var _ = makeLogFilename diff --git a/vendor/github.com/ncruces/go-strftime/.gitignore b/vendor/github.com/ncruces/go-strftime/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/ncruces/go-strftime/LICENSE b/vendor/github.com/ncruces/go-strftime/LICENSE new file mode 100644 index 0000000000..7f0f5534ca --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Nuno Cruces + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ncruces/go-strftime/README.md b/vendor/github.com/ncruces/go-strftime/README.md new file mode 100644 index 0000000000..5b0573cf01 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/README.md @@ -0,0 +1,5 @@ +# `strftime`/`strptime` compatible time formatting and parsing for Go + +[![Go Reference](https://pkg.go.dev/badge/image)](https://pkg.go.dev/github.com/ncruces/go-strftime) +[![Go Report](https://goreportcard.com/badge/github.com/ncruces/go-strftime)](https://goreportcard.com/report/github.com/ncruces/go-strftime) +[![Go Coverage](https://github.com/ncruces/go-strftime/wiki/coverage.svg)](https://raw.githack.com/wiki/ncruces/go-strftime/coverage.html) \ No newline at end of file diff --git a/vendor/github.com/ncruces/go-strftime/parser.go b/vendor/github.com/ncruces/go-strftime/parser.go new file mode 100644 index 0000000000..b006de38a8 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/parser.go @@ -0,0 +1,107 @@ +package strftime + +import "unicode/utf8" + +type parser struct { + format func(spec, flag byte) error + literal func(byte) error +} + +func (p *parser) parse(fmt string) error { + const ( + initial = iota + percent + flagged + modified + ) + + var flag, modifier byte + var err error + state := initial + start := 0 + for i, b := range []byte(fmt) { + switch state { + default: + if b == '%' { + state = percent + start = i + continue + } + err = p.literal(b) + + case percent: + if b == '-' || b == ':' { + state = flagged + flag = b + continue + } + if b == 'E' || b == 'O' { + state = modified + modifier = b + flag = 0 + continue + } + err = p.format(b, 0) + state = initial + + case flagged: + if b == 'E' || b == 'O' { + state = modified + modifier = b + continue + } + err = p.format(b, flag) + state = initial + + case modified: + if okModifier(modifier, b) { + err = p.format(b, flag) + } else { + err = p.literals(fmt[start : i+1]) + } + state = initial + } + + if err != nil { + if err, ok := err.(formatError); ok { + err.setDirective(fmt, start, i) + return err + } + return err + } + } + + if state != initial { + return p.literals(fmt[start:]) + } + return nil +} + +func (p *parser) literals(literal string) error { + for _, b := range []byte(literal) { + if err := p.literal(b); err != nil { + return err + } + } + return nil +} + +type literalErr string + +func (e literalErr) Error() string { + return "strftime: unsupported literal: " + string(e) +} + +type formatError struct { + message string + directive string +} + +func (e formatError) Error() string { + return "strftime: unsupported directive: " + e.directive + " " + e.message +} + +func (e *formatError) setDirective(str string, i, j int) { + _, n := utf8.DecodeRuneInString(str[j:]) + e.directive = str[i : j+n] +} diff --git a/vendor/github.com/ncruces/go-strftime/pkg.go b/vendor/github.com/ncruces/go-strftime/pkg.go new file mode 100644 index 0000000000..9a3bbd327f --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/pkg.go @@ -0,0 +1,96 @@ +/* +Package strftime provides strftime/strptime compatible time formatting and parsing. + +The following specifiers are available: + + Date (Year, Month, Day): + %Y - Year with century (can be negative, 4 digits at least) + -0001, 0000, 1995, 2009, 14292, etc. + %C - year / 100 (round down, 20 in 2009) + %y - year % 100 (00..99) + + %m - Month of the year, zero-padded (01..12) + %-m no-padded (1..12) + %B - Full month name (January) + %b - Abbreviated month name (Jan) + %h - Equivalent to %b + + %d - Day of the month, zero-padded (01..31) + %-d no-padded (1..31) + %e - Day of the month, blank-padded ( 1..31) + + %j - Day of the year (001..366) + %-j no-padded (1..366) + + Time (Hour, Minute, Second, Subsecond): + %H - Hour of the day, 24-hour clock, zero-padded (00..23) + %-H no-padded (0..23) + %k - Hour of the day, 24-hour clock, blank-padded ( 0..23) + %I - Hour of the day, 12-hour clock, zero-padded (01..12) + %-I no-padded (1..12) + %l - Hour of the day, 12-hour clock, blank-padded ( 1..12) + %P - Meridian indicator, lowercase (am or pm) + %p - Meridian indicator, uppercase (AM or PM) + + %M - Minute of the hour (00..59) + %-M no-padded (0..59) + + %S - Second of the minute (00..60) + %-S no-padded (0..60) + + %L - Millisecond of the second (000..999) + %f - Microsecond of the second (000000..999999) + %N - Nanosecond of the second (000000000..999999999) + + Time zone: + %z - Time zone as hour and minute offset from UTC (e.g. +0900) + %:z - hour and minute offset from UTC with a colon (e.g. +09:00) + %Z - Time zone abbreviation (e.g. MST) + + Weekday: + %A - Full weekday name (Sunday) + %a - Abbreviated weekday name (Sun) + %u - Day of the week (Monday is 1, 1..7) + %w - Day of the week (Sunday is 0, 0..6) + + ISO 8601 week-based year and week number: + Week 1 of YYYY starts with a Monday and includes YYYY-01-04. + The days in the year before the first week are in the last week of + the previous year. + %G - Week-based year + %g - Last 2 digits of the week-based year (00..99) + %V - Week number of the week-based year (01..53) + %-V no-padded (1..53) + + Week number: + Week 1 of YYYY starts with a Sunday or Monday (according to %U or %W). + The days in the year before the first week are in week 0. + %U - Week number of the year. The week starts with Sunday. (00..53) + %-U no-padded (0..53) + %W - Week number of the year. The week starts with Monday. (00..53) + %-W no-padded (0..53) + + Seconds since the Unix Epoch: + %s - Number of seconds since 1970-01-01 00:00:00 UTC. + %Q - Number of milliseconds since 1970-01-01 00:00:00 UTC. + + Literal string: + %n - Newline character (\n) + %t - Tab character (\t) + %% - Literal % character + + Combination: + %c - date and time (%a %b %e %T %Y) + %D - Date (%m/%d/%y) + %F - ISO 8601 date format (%Y-%m-%d) + %v - VMS date (%e-%b-%Y) + %x - Same as %D + %X - Same as %T + %r - 12-hour time (%I:%M:%S %p) + %R - 24-hour time (%H:%M) + %T - 24-hour time (%H:%M:%S) + %+ - date(1) (%a %b %e %H:%M:%S %Z %Y) + +The modifiers ``E'' and ``O'' are ignored. +*/ +package strftime diff --git a/vendor/github.com/ncruces/go-strftime/specifiers.go b/vendor/github.com/ncruces/go-strftime/specifiers.go new file mode 100644 index 0000000000..065f779633 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/specifiers.go @@ -0,0 +1,241 @@ +package strftime + +import "strings" + +// https://strftime.org/ +func goLayout(spec, flag byte, parsing bool) string { + switch spec { + default: + return "" + + case 'B': + return "January" + case 'b', 'h': + return "Jan" + case 'm': + if flag == '-' || parsing { + return "1" + } + return "01" + case 'A': + return "Monday" + case 'a': + return "Mon" + case 'e': + return "_2" + case 'd': + if flag == '-' || parsing { + return "2" + } + return "02" + case 'j': + if flag == '-' { + if parsing { + return "__2" + } + return "" + } + return "002" + case 'I': + if flag == '-' || parsing { + return "3" + } + return "03" + case 'H': + if flag == '-' && !parsing { + return "" + } + return "15" + case 'M': + if flag == '-' || parsing { + return "4" + } + return "04" + case 'S': + if flag == '-' || parsing { + return "5" + } + return "05" + case 'y': + return "06" + case 'Y': + return "2006" + case 'p': + return "PM" + case 'P': + return "pm" + case 'Z': + return "MST" + case 'z': + if flag == ':' { + if parsing { + return "Z07:00" + } + return "-07:00" + } + if parsing { + return "Z0700" + } + return "-0700" + + case '+': + if parsing { + return "Mon Jan _2 15:4:5 MST 2006" + } + return "Mon Jan _2 15:04:05 MST 2006" + case 'c': + if parsing { + return "Mon Jan _2 15:4:5 2006" + } + return "Mon Jan _2 15:04:05 2006" + case 'v': + return "_2-Jan-2006" + case 'F': + if parsing { + return "2006-1-2" + } + return "2006-01-02" + case 'D', 'x': + if parsing { + return "1/2/06" + } + return "01/02/06" + case 'r': + if parsing { + return "3:4:5 PM" + } + return "03:04:05 PM" + case 'T', 'X': + if parsing { + return "15:4:5" + } + return "15:04:05" + case 'R': + if parsing { + return "15:4" + } + return "15:04" + + case '%': + return "%" + case 't': + return "\t" + case 'n': + return "\n" + } +} + +// https://nsdateformatter.com/ +func uts35Pattern(spec, flag byte) string { + switch spec { + default: + return "" + + case 'B': + return "MMMM" + case 'b', 'h': + return "MMM" + case 'm': + if flag == '-' { + return "M" + } + return "MM" + case 'A': + return "EEEE" + case 'a': + return "E" + case 'd': + if flag == '-' { + return "d" + } + return "dd" + case 'j': + if flag == '-' { + return "D" + } + return "DDD" + case 'I': + if flag == '-' { + return "h" + } + return "hh" + case 'H': + if flag == '-' { + return "H" + } + return "HH" + case 'M': + if flag == '-' { + return "m" + } + return "mm" + case 'S': + if flag == '-' { + return "s" + } + return "ss" + case 'y': + return "yy" + case 'Y': + return "yyyy" + case 'g': + return "YY" + case 'G': + return "YYYY" + case 'V': + if flag == '-' { + return "w" + } + return "ww" + case 'p': + return "a" + case 'Z': + return "zzz" + case 'z': + if flag == ':' { + return "xxx" + } + return "xx" + case 'L': + return "SSS" + case 'f': + return "SSSSSS" + case 'N': + return "SSSSSSSSS" + + case '+': + return "E MMM d HH:mm:ss zzz yyyy" + case 'c': + return "E MMM d HH:mm:ss yyyy" + case 'v': + return "d-MMM-yyyy" + case 'F': + return "yyyy-MM-dd" + case 'D', 'x': + return "MM/dd/yy" + case 'r': + return "hh:mm:ss a" + case 'T', 'X': + return "HH:mm:ss" + case 'R': + return "HH:mm" + + case '%': + return "%" + case 't': + return "\t" + case 'n': + return "\n" + } +} + +// http://man.he.net/man3/strftime +func okModifier(mod, spec byte) bool { + if mod == 'E' { + return strings.Contains("cCxXyY", string(spec)) + } + if mod == 'O' { + return strings.Contains("deHImMSuUVwWy", string(spec)) + } + return false +} diff --git a/vendor/github.com/ncruces/go-strftime/strftime.go b/vendor/github.com/ncruces/go-strftime/strftime.go new file mode 100644 index 0000000000..5308ef7727 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/strftime.go @@ -0,0 +1,324 @@ +package strftime + +import ( + "bytes" + "strconv" + "time" +) + +// Format returns a textual representation of the time value +// formatted according to the strftime format specification. +func Format(fmt string, t time.Time) string { + buf := buffer(fmt) + return string(AppendFormat(buf, fmt, t)) +} + +// AppendFormat is like Format, but appends the textual representation +// to dst and returns the extended buffer. +func AppendFormat(dst []byte, fmt string, t time.Time) []byte { + var parser parser + + parser.literal = func(b byte) error { + dst = append(dst, b) + return nil + } + + parser.format = func(spec, flag byte) error { + switch spec { + case 'A': + dst = append(dst, t.Weekday().String()...) + return nil + case 'a': + dst = append(dst, t.Weekday().String()[:3]...) + return nil + case 'B': + dst = append(dst, t.Month().String()...) + return nil + case 'b', 'h': + dst = append(dst, t.Month().String()[:3]...) + return nil + case 'm': + dst = appendInt2(dst, int(t.Month()), flag) + return nil + case 'd': + dst = appendInt2(dst, int(t.Day()), flag) + return nil + case 'e': + dst = appendInt2(dst, int(t.Day()), ' ') + return nil + case 'I': + dst = append12Hour(dst, t, flag) + return nil + case 'l': + dst = append12Hour(dst, t, ' ') + return nil + case 'H': + dst = appendInt2(dst, t.Hour(), flag) + return nil + case 'k': + dst = appendInt2(dst, t.Hour(), ' ') + return nil + case 'M': + dst = appendInt2(dst, t.Minute(), flag) + return nil + case 'S': + dst = appendInt2(dst, t.Second(), flag) + return nil + case 'L': + dst = append(dst, t.Format(".000")[1:]...) + return nil + case 'f': + dst = append(dst, t.Format(".000000")[1:]...) + return nil + case 'N': + dst = append(dst, t.Format(".000000000")[1:]...) + return nil + case 'y': + dst = t.AppendFormat(dst, "06") + return nil + case 'Y': + dst = t.AppendFormat(dst, "2006") + return nil + case 'C': + dst = t.AppendFormat(dst, "2006") + dst = dst[:len(dst)-2] + return nil + case 'U': + dst = appendWeekNumber(dst, t, flag, true) + return nil + case 'W': + dst = appendWeekNumber(dst, t, flag, false) + return nil + case 'V': + _, w := t.ISOWeek() + dst = appendInt2(dst, w, flag) + return nil + case 'g': + y, _ := t.ISOWeek() + dst = year(y).AppendFormat(dst, "06") + return nil + case 'G': + y, _ := t.ISOWeek() + dst = year(y).AppendFormat(dst, "2006") + return nil + case 's': + dst = strconv.AppendInt(dst, t.Unix(), 10) + return nil + case 'Q': + dst = strconv.AppendInt(dst, t.UnixMilli(), 10) + return nil + case 'w': + w := t.Weekday() + dst = appendInt1(dst, int(w)) + return nil + case 'u': + if w := t.Weekday(); w == 0 { + dst = append(dst, '7') + } else { + dst = appendInt1(dst, int(w)) + } + return nil + case 'j': + if flag == '-' { + dst = strconv.AppendInt(dst, int64(t.YearDay()), 10) + } else { + dst = t.AppendFormat(dst, "002") + } + return nil + } + + if layout := goLayout(spec, flag, false); layout != "" { + dst = t.AppendFormat(dst, layout) + return nil + } + + dst = append(dst, '%') + if flag != 0 { + dst = append(dst, flag) + } + dst = append(dst, spec) + return nil + } + + parser.parse(fmt) + return dst +} + +// Parse converts a textual representation of time to the time value it represents +// according to the strptime format specification. +func Parse(fmt, value string) (time.Time, error) { + pattern, err := layout(fmt, true) + if err != nil { + return time.Time{}, err + } + return time.Parse(pattern, value) +} + +// Layout converts a strftime format specification +// to a Go time pattern specification. +func Layout(fmt string) (string, error) { + return layout(fmt, false) +} + +func layout(fmt string, parsing bool) (string, error) { + dst := buffer(fmt) + var parser parser + + parser.literal = func(b byte) error { + if '0' <= b && b <= '9' { + return literalErr(b) + } + dst = append(dst, b) + if b == 'M' || b == 'T' || b == 'm' || b == 'n' { + switch { + case bytes.HasSuffix(dst, []byte("Jan")): + return literalErr("Jan") + case bytes.HasSuffix(dst, []byte("Mon")): + return literalErr("Mon") + case bytes.HasSuffix(dst, []byte("MST")): + return literalErr("MST") + case bytes.HasSuffix(dst, []byte("PM")): + return literalErr("PM") + case bytes.HasSuffix(dst, []byte("pm")): + return literalErr("pm") + } + } + return nil + } + + parser.format = func(spec, flag byte) error { + if layout := goLayout(spec, flag, parsing); layout != "" { + dst = append(dst, layout...) + return nil + } + + switch spec { + default: + return formatError{} + + case 'L', 'f', 'N': + if bytes.HasSuffix(dst, []byte(".")) || bytes.HasSuffix(dst, []byte(",")) { + switch spec { + default: + dst = append(dst, "000"...) + case 'f': + dst = append(dst, "000000"...) + case 'N': + dst = append(dst, "000000000"...) + } + return nil + } + return formatError{message: "must follow '.' or ','"} + } + } + + if err := parser.parse(fmt); err != nil { + return "", err + } + return string(dst), nil +} + +// UTS35 converts a strftime format specification +// to a Unicode Technical Standard #35 Date Format Pattern. +func UTS35(fmt string) (string, error) { + const quote = '\'' + var quoted bool + dst := buffer(fmt) + + var parser parser + + parser.literal = func(b byte) error { + if b == quote { + dst = append(dst, quote, quote) + return nil + } + if !quoted && ('a' <= b && b <= 'z' || 'A' <= b && b <= 'Z') { + dst = append(dst, quote) + quoted = true + } + dst = append(dst, b) + return nil + } + + parser.format = func(spec, flag byte) error { + if quoted { + dst = append(dst, quote) + quoted = false + } + if pattern := uts35Pattern(spec, flag); pattern != "" { + dst = append(dst, pattern...) + return nil + } + return formatError{} + } + + if err := parser.parse(fmt); err != nil { + return "", err + } + if quoted { + dst = append(dst, quote) + } + return string(dst), nil +} + +func buffer(format string) (buf []byte) { + const bufSize = 64 + max := len(format) + 10 + if max < bufSize { + var b [bufSize]byte + buf = b[:0] + } else { + buf = make([]byte, 0, max) + } + return +} + +func year(y int) time.Time { + return time.Date(y, time.January, 1, 0, 0, 0, 0, time.UTC) +} + +func appendWeekNumber(dst []byte, t time.Time, flag byte, sunday bool) []byte { + offset := int(t.Weekday()) + if sunday { + offset = 6 - offset + } else if offset != 0 { + offset = 7 - offset + } + return appendInt2(dst, (t.YearDay()+offset)/7, flag) +} + +func append12Hour(dst []byte, t time.Time, flag byte) []byte { + h := t.Hour() + if h == 0 { + h = 12 + } else if h > 12 { + h -= 12 + } + return appendInt2(dst, h, flag) +} + +func appendInt1(dst []byte, i int) []byte { + return append(dst, byte('0'+i)) +} + +func appendInt2(dst []byte, i int, flag byte) []byte { + if flag == 0 || i >= 10 { + return append(dst, smallsString[i*2:i*2+2]...) + } + if flag == ' ' { + dst = append(dst, flag) + } + return appendInt1(dst, i) +} + +const smallsString = "" + + "00010203040506070809" + + "10111213141516171819" + + "20212223242526272829" + + "30313233343536373839" + + "40414243444546474849" + + "50515253545556575859" + + "60616263646566676869" + + "70717273747576777879" + + "80818283848586878889" + + "90919293949596979899" diff --git a/vendor/modernc.org/libc/libc.go b/vendor/modernc.org/libc/libc.go index 4680259392..46288a8f49 100644 --- a/vendor/modernc.org/libc/libc.go +++ b/vendor/modernc.org/libc/libc.go @@ -1176,6 +1176,10 @@ func Xlog10(t *TLS, x float64) float64 { return math.Log10(x) } +func X__builtin_log2(t *TLS, x float64) float64 { + return Xlog2(t, x) +} + func Xlog2(t *TLS, x float64) float64 { if __ccgo_strace { trc("t=%v x=%v, (%v:)", t, x, origin(2)) @@ -2382,27 +2386,26 @@ func Xrintf(tls *TLS, x float32) (r float32) { return y } -func X__builtin_lrintf(tls *TLS, x float32) (r int64) { +func X__builtin_lrintf(tls *TLS, x float32) (r long) { return Xlrintf(tls, x) } - -func Xlrintf(tls *TLS, x float32) (r int64) { +func Xlrintf(tls *TLS, x float32) (r long) { if __ccgo_strace { trc("tls=%v x=%v, (%v:)", tls, x, origin(2)) defer func() { trc("-> %v", r) }() } - return int64(Xrintf(tls, x)) + return long(Xrintf(tls, x)) } -func X__builtin_lrint(tls *TLS, x float64) (r int64) { +func X__builtin_lrint(tls *TLS, x float64) (r long) { return Xlrint(tls, x) } -func Xlrint(tls *TLS, x float64) (r int64) { +func Xlrint(tls *TLS, x float64) (r long) { if __ccgo_strace { trc("tls=%v x=%v, (%v:)", tls, x, origin(2)) defer func() { trc("-> %v", r) }() } - return int64(Xrint(tls, x)) + return long(Xrint(tls, x)) } diff --git a/vendor/modernc.org/libc/libc_darwin.go b/vendor/modernc.org/libc/libc_darwin.go index d26e7c9a51..7533b9af1b 100644 --- a/vendor/modernc.org/libc/libc_darwin.go +++ b/vendor/modernc.org/libc/libc_darwin.go @@ -1475,15 +1475,6 @@ func Xrealpath(t *TLS, path, resolved_path uintptr) uintptr { } return resolved_path } - -// struct tm *gmtime_r(const time_t *timep, struct tm *result); -func Xgmtime_r(t *TLS, timep, result uintptr) uintptr { - if __ccgo_strace { - trc("t=%v result=%v, (%v:)", t, result, origin(2)) - } - panic(todo("")) -} - // char *inet_ntoa(struct in_addr in); func Xinet_ntoa(t *TLS, in1 in.In_addr) uintptr { if __ccgo_strace { diff --git a/vendor/modernc.org/libc/libc_freebsd.go b/vendor/modernc.org/libc/libc_freebsd.go index 2497d69422..f3c9dcc071 100644 --- a/vendor/modernc.org/libc/libc_freebsd.go +++ b/vendor/modernc.org/libc/libc_freebsd.go @@ -1256,14 +1256,6 @@ func Xrealpath(t *TLS, path, resolved_path uintptr) uintptr { return resolved_path } -// struct tm *gmtime_r(const time_t *timep, struct tm *result); -func Xgmtime_r(t *TLS, timep, result uintptr) uintptr { - if __ccgo_strace { - trc("t=%v result=%v, (%v:)", t, result, origin(2)) - } - panic(todo("")) -} - // char *inet_ntoa(struct in_addr in); func Xinet_ntoa(t *TLS, in1 in.In_addr) uintptr { if __ccgo_strace { diff --git a/vendor/modernc.org/libc/libc_illumos.go b/vendor/modernc.org/libc/libc_illumos.go index 66704a5194..5224f4722e 100644 --- a/vendor/modernc.org/libc/libc_illumos.go +++ b/vendor/modernc.org/libc/libc_illumos.go @@ -1337,14 +1337,6 @@ func Xrealpath(t *TLS, path, resolved_path uintptr) uintptr { return resolved_path } -// struct tm *gmtime_r(const time_t *timep, struct tm *result); -func Xgmtime_r(t *TLS, timep, result uintptr) uintptr { - if __ccgo_strace { - trc("t=%v result=%v, (%v:)", t, result, origin(2)) - } - panic(todo("")) -} - // char *inet_ntoa(struct in_addr in); func Xinet_ntoa(t *TLS, in1 in.In_addr) uintptr { if __ccgo_strace { diff --git a/vendor/modernc.org/libc/libc_linux.go b/vendor/modernc.org/libc/libc_linux.go index 9638a87f1f..98e9c6373f 100644 --- a/vendor/modernc.org/libc/libc_linux.go +++ b/vendor/modernc.org/libc/libc_linux.go @@ -1271,14 +1271,6 @@ func Xrealpath(t *TLS, path, resolved_path uintptr) uintptr { return resolved_path } -// struct tm *gmtime_r(const time_t *timep, struct tm *result); -func Xgmtime_r(t *TLS, timep, result uintptr) uintptr { - if __ccgo_strace { - trc("t=%v result=%v, (%v:)", t, result, origin(2)) - } - panic(todo("")) -} - // char *inet_ntoa(struct in_addr in); func Xinet_ntoa(t *TLS, in1 in.In_addr) uintptr { if __ccgo_strace { diff --git a/vendor/modernc.org/libc/libc_netbsd.go b/vendor/modernc.org/libc/libc_netbsd.go index c7c495daa6..9a152d8d3a 100644 --- a/vendor/modernc.org/libc/libc_netbsd.go +++ b/vendor/modernc.org/libc/libc_netbsd.go @@ -1291,14 +1291,6 @@ func Xrealpath(t *TLS, path, resolved_path uintptr) uintptr { return resolved_path } -// struct tm *gmtime_r(const time_t *timep, struct tm *result); -func Xgmtime_r(t *TLS, timep, result uintptr) uintptr { - if __ccgo_strace { - trc("t=%v result=%v, (%v:)", t, result, origin(2)) - } - panic(todo("")) -} - // char *inet_ntoa(struct in_addr in); func Xinet_ntoa(t *TLS, in1 in.In_addr) uintptr { if __ccgo_strace { diff --git a/vendor/modernc.org/libc/libc_openbsd.go b/vendor/modernc.org/libc/libc_openbsd.go index 3e1becf83f..e93c65bdf2 100644 --- a/vendor/modernc.org/libc/libc_openbsd.go +++ b/vendor/modernc.org/libc/libc_openbsd.go @@ -1291,14 +1291,6 @@ func Xrealpath(t *TLS, path, resolved_path uintptr) uintptr { return resolved_path } -// struct tm *gmtime_r(const time_t *timep, struct tm *result); -func Xgmtime_r(t *TLS, timep, result uintptr) uintptr { - if __ccgo_strace { - trc("t=%v result=%v, (%v:)", t, result, origin(2)) - } - panic(todo("")) -} - // char *inet_ntoa(struct in_addr in); func Xinet_ntoa(t *TLS, in1 in.In_addr) uintptr { if __ccgo_strace { diff --git a/vendor/modernc.org/libc/libc_unix.go b/vendor/modernc.org/libc/libc_unix.go index 722e68f217..f2088f25d3 100644 --- a/vendor/modernc.org/libc/libc_unix.go +++ b/vendor/modernc.org/libc/libc_unix.go @@ -24,10 +24,12 @@ import ( "time" "unsafe" + "github.com/ncruces/go-strftime" guuid "github.com/google/uuid" "golang.org/x/sys/unix" "modernc.org/libc/errno" "modernc.org/libc/grp" + "modernc.org/libc/limits" "modernc.org/libc/poll" "modernc.org/libc/pwd" "modernc.org/libc/signal" @@ -1240,3 +1242,144 @@ func Xsysctlbyname(t *TLS, name, oldp, oldlenp, newp uintptr, newlen types.Size_ // func init() { // defaultZone = addObject(newMallocZone(true)) // } + +// /tmp/libc/musl-master/src/time/gmtime.c:6:19: +var _tm ctime.Tm + +// /tmp/libc/musl-master/src/time/gmtime.c:4:11: +func Xgmtime(tls *TLS, t uintptr) (r uintptr) { // /tmp/libc/musl-master/src/time/gmtime.c:7:2: + if __ccgo_strace { + trc("tls=%v t=%v, (%v:)", tls, t, origin(2)) + defer func() { trc("-> %v", r) }() + } + return Xgmtime_r(tls, t, uintptr(unsafe.Pointer(&_tm))) +} + + +var _days_in_month = [12]int8{ + 0: int8(31), + 1: int8(30), + 2: int8(31), + 3: int8(30), + 4: int8(31), + 5: int8(31), + 6: int8(30), + 7: int8(31), + 8: int8(30), + 9: int8(31), + 10: int8(31), + 11: int8(29), +} + +var x___utc = [4]int8{'U', 'T', 'C'} + +func Xstrftime(tls *TLS, s uintptr, n size_t, f uintptr, tm uintptr) (r size_t) { + if __ccgo_strace { + trc("tls=%v s=%v n=%v f=%v tm=%v, (%v:)", tls, s, n, f, tm, origin(2)) + defer func() { trc("-> %v", r) }() + } + t := Xmktime(tls, tm) + fmt := GoString(f) + var result string + if fmt != "" { + result = strftime.Format(fmt, time.Unix(int64(t), 0)) + } + switch r = size_t(len(result)); { + case r > n: + r = 0 + default: + copy((*RawMem)(unsafe.Pointer(s))[:r:r], result) + *(*byte)(unsafe.Pointer(s + uintptr(r))) = 0 + } + return r + +} + +func Xgmtime_r(tls *TLS, t uintptr, tm uintptr) (r uintptr) { + if __ccgo_strace { + trc("tls=%v t=%v tm=%v, (%v:)", tls, t, tm, origin(2)) + defer func() { trc("-> %v", r) }() + } + if x___secs_to_tm(tls, int64(*(*time_t)(unsafe.Pointer(t))), tm) < 0 { + *(*int32)(unsafe.Pointer(X__errno_location(tls))) = int32(errno.EOVERFLOW) + return uintptr(0) + } + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_isdst = 0 + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_gmtoff = 0 + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_zone = uintptr(unsafe.Pointer(&x___utc)) + return tm +} + +func x___secs_to_tm(tls *TLS, t int64, tm uintptr) (r int32) { + var c_cycles, leap, months, q_cycles, qc_cycles, remdays, remsecs, remyears, wday, yday int32 + var days, secs, years int64 + _, _, _, _, _, _, _, _, _, _, _, _, _ = c_cycles, days, leap, months, q_cycles, qc_cycles, remdays, remsecs, remyears, secs, wday, yday, years + /* Reject time_t values whose year would overflow int */ + if t < int64(-Int32FromInt32(1)-Int32FromInt32(0x7fffffff))*Int64FromInt64(31622400) || t > Int64FromInt32(limits.INT_MAX)*Int64FromInt64(31622400) { + return -int32(1) + } + secs = t - (Int64FromInt64(946684800) + int64(Int32FromInt32(86400)*(Int32FromInt32(31)+Int32FromInt32(29)))) + days = secs / int64(86400) + remsecs = int32(secs % int64(86400)) + if remsecs < 0 { + remsecs += int32(86400) + days-- + } + wday = int32((int64(3) + days) % int64(7)) + if wday < 0 { + wday += int32(7) + } + qc_cycles = int32(days / int64(Int32FromInt32(365)*Int32FromInt32(400)+Int32FromInt32(97))) + remdays = int32(days % int64(Int32FromInt32(365)*Int32FromInt32(400)+Int32FromInt32(97))) + if remdays < 0 { + remdays += Int32FromInt32(365)*Int32FromInt32(400) + Int32FromInt32(97) + qc_cycles-- + } + c_cycles = remdays / (Int32FromInt32(365)*Int32FromInt32(100) + Int32FromInt32(24)) + if c_cycles == int32(4) { + c_cycles-- + } + remdays -= c_cycles * (Int32FromInt32(365)*Int32FromInt32(100) + Int32FromInt32(24)) + q_cycles = remdays / (Int32FromInt32(365)*Int32FromInt32(4) + Int32FromInt32(1)) + if q_cycles == int32(25) { + q_cycles-- + } + remdays -= q_cycles * (Int32FromInt32(365)*Int32FromInt32(4) + Int32FromInt32(1)) + remyears = remdays / int32(365) + if remyears == int32(4) { + remyears-- + } + remdays -= remyears * int32(365) + leap = BoolInt32(!(remyears != 0) && (q_cycles != 0 || !(c_cycles != 0))) + yday = remdays + int32(31) + int32(28) + leap + if yday >= int32(365)+leap { + yday -= int32(365) + leap + } + years = int64(remyears+int32(4)*q_cycles+int32(100)*c_cycles) + int64(400)*int64(int64(qc_cycles)) + months = 0 + for { + if !(int32(_days_in_month[months]) <= remdays) { + break + } + remdays -= int32(_days_in_month[months]) + goto _1 + _1: + months++ + } + if months >= int32(10) { + months -= int32(12) + years++ + } + if years+int64(100) > int64(limits.INT_MAX) || years+int64(100) < int64(-Int32FromInt32(1)-Int32FromInt32(0x7fffffff)) { + return -int32(1) + } + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_year = int32(years + int64(100)) + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_mon = months + int32(2) + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_mday = remdays + int32(1) + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_wday = wday + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_yday = yday + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_hour = remsecs / int32(3600) + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_min = remsecs / int32(60) % int32(60) + (*ctime.Tm)(unsafe.Pointer(tm)).Ftm_sec = remsecs % int32(60) + return 0 +} diff --git a/vendor/modernc.org/libc/libc_windows.go b/vendor/modernc.org/libc/libc_windows.go index 0790d60935..48cda9f870 100644 --- a/vendor/modernc.org/libc/libc_windows.go +++ b/vendor/modernc.org/libc/libc_windows.go @@ -161,6 +161,7 @@ var ( procSleepEx = modkernel32.NewProc("SleepEx") procSystemTimeToFileTime = modkernel32.NewProc("SystemTimeToFileTime") procTerminateThread = modkernel32.NewProc("TerminateThread") + procTryEnterCriticalSection = modkernel32.NewProc("TryEnterCriticalSection") procUnlockFile = modkernel32.NewProc("UnlockFile") procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") procWaitForSingleObjectEx = modkernel32.NewProc("WaitForSingleObjectEx") @@ -2942,6 +2943,22 @@ func XEnterCriticalSection(t *TLS, lpCriticalSection uintptr) { syscall.Syscall(procEnterCriticalSection.Addr(), 1, lpCriticalSection, 0, 0) } +// BOOL TryEnterCriticalSection( +// +// LPCRITICAL_SECTION lpCriticalSection +// +// ); +func XTryEnterCriticalSection(t *TLS, lpCriticalSection uintptr) (r int32) { + if __ccgo_strace { + trc("t=%v lpCriticalSection=%v, (%v:)", t, lpCriticalSection, origin(2)) + } + r0, _, err := syscall.SyscallN(procTryEnterCriticalSection.Addr(), lpCriticalSection) + if err != 0 { + t.setErrno(err) + } + return int32(r0) +} + // void LeaveCriticalSection( // // LPCRITICAL_SECTION lpCriticalSection @@ -7406,3 +7423,7 @@ func X_stat64i32(t *TLS, path uintptr, buffer uintptr) int32 { } return int32(r0) } + +func AtomicLoadNUint8(ptr uintptr, memorder int32) uint8 { + return byte(a_load_8(ptr)) +} diff --git a/vendor/modernc.org/libc/libc_windows_386.go b/vendor/modernc.org/libc/libc_windows_386.go index dd3fe87aee..5865b0ac67 100644 --- a/vendor/modernc.org/libc/libc_windows_386.go +++ b/vendor/modernc.org/libc/libc_windows_386.go @@ -684,3 +684,29 @@ func X_fstat(t *TLS, fd int32, buffer uintptr) int32 { return 0 } + +func Xstrspn(tls *TLS, s uintptr, c uintptr) size_t { /* strspn.c:6:8: */ + if __ccgo_strace { + trc("tls=%v s=%v c=%v, (%v:)", tls, s, c, origin(2)) + } + bp := tls.Alloc(32) + defer tls.Free(32) + + var a uintptr = s + *(*[8]size_t)(unsafe.Pointer(bp /* byteset */)) = [8]size_t{0: size_t(0)} + + if !(int32(*(*int8)(unsafe.Pointer(c))) != 0) { + return size_t(0) + } + if !(int32(*(*int8)(unsafe.Pointer(c + 1))) != 0) { + for ; int32(*(*int8)(unsafe.Pointer(s))) == int32(*(*int8)(unsafe.Pointer(c))); s++ { + } + return size_t((int32(s) - int32(a)) / 1) + } + + for ; *(*int8)(unsafe.Pointer(c)) != 0 && AssignOrPtrUint32(bp+uintptr(size_t(*(*uint8)(unsafe.Pointer(c)))/(uint32(8)*uint32(unsafe.Sizeof(size_t(0)))))*4, size_t(size_t(1))<<(size_t(*(*uint8)(unsafe.Pointer(c)))%(uint32(8)*uint32(unsafe.Sizeof(size_t(0)))))) != 0; c++ { + } + for ; *(*int8)(unsafe.Pointer(s)) != 0 && *(*size_t)(unsafe.Pointer(bp + uintptr(size_t(*(*uint8)(unsafe.Pointer(s)))/(uint32(8)*uint32(unsafe.Sizeof(size_t(0)))))*4))&(size_t(size_t(1))<<(size_t(*(*uint8)(unsafe.Pointer(s)))%(uint32(8)*uint32(unsafe.Sizeof(size_t(0)))))) != 0; s++ { + } + return size_t((int32(s) - int32(a)) / 1) +} diff --git a/vendor/modernc.org/libc/libc_windows_amd64.go b/vendor/modernc.org/libc/libc_windows_amd64.go index 16af3e1ce5..81d67a6956 100644 --- a/vendor/modernc.org/libc/libc_windows_amd64.go +++ b/vendor/modernc.org/libc/libc_windows_amd64.go @@ -601,3 +601,29 @@ func XDefWindowProcW(t *TLS, _ ...interface{}) int64 { func XSendMessageTimeoutW(t *TLS, _ ...interface{}) int64 { panic(todo("")) } + +func Xstrspn(tls *TLS, s uintptr, c uintptr) size_t { /* strspn.c:6:8: */ + if __ccgo_strace { + trc("tls=%v s=%v c=%v, (%v:)", tls, s, c, origin(2)) + } + bp := tls.Alloc(32) + defer tls.Free(32) + + var a uintptr = s + *(*[4]size_t)(unsafe.Pointer(bp /* byteset */)) = [4]size_t{0: uint64(0)} + + if !(int32(*(*int8)(unsafe.Pointer(c))) != 0) { + return uint64(0) + } + if !(int32(*(*int8)(unsafe.Pointer(c + 1))) != 0) { + for ; int32(*(*int8)(unsafe.Pointer(s))) == int32(*(*int8)(unsafe.Pointer(c))); s++ { + } + return size_t((int64(s) - int64(a)) / 1) + } + + for ; *(*int8)(unsafe.Pointer(c)) != 0 && AssignOrPtrUint64(bp+uintptr(size_t(*(*uint8)(unsafe.Pointer(c)))/(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))*8, size_t(uint64(1))<<(size_t(*(*uint8)(unsafe.Pointer(c)))%(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))) != 0; c++ { + } + for ; *(*int8)(unsafe.Pointer(s)) != 0 && *(*size_t)(unsafe.Pointer(bp + uintptr(size_t(*(*uint8)(unsafe.Pointer(s)))/(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))*8))&(size_t(uint64(1))<<(size_t(*(*uint8)(unsafe.Pointer(s)))%(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))) != 0; s++ { + } + return size_t((int64(s) - int64(a)) / 1) +} diff --git a/vendor/modernc.org/libc/libc_windows_arm64.go b/vendor/modernc.org/libc/libc_windows_arm64.go index 16af3e1ce5..81d67a6956 100644 --- a/vendor/modernc.org/libc/libc_windows_arm64.go +++ b/vendor/modernc.org/libc/libc_windows_arm64.go @@ -601,3 +601,29 @@ func XDefWindowProcW(t *TLS, _ ...interface{}) int64 { func XSendMessageTimeoutW(t *TLS, _ ...interface{}) int64 { panic(todo("")) } + +func Xstrspn(tls *TLS, s uintptr, c uintptr) size_t { /* strspn.c:6:8: */ + if __ccgo_strace { + trc("tls=%v s=%v c=%v, (%v:)", tls, s, c, origin(2)) + } + bp := tls.Alloc(32) + defer tls.Free(32) + + var a uintptr = s + *(*[4]size_t)(unsafe.Pointer(bp /* byteset */)) = [4]size_t{0: uint64(0)} + + if !(int32(*(*int8)(unsafe.Pointer(c))) != 0) { + return uint64(0) + } + if !(int32(*(*int8)(unsafe.Pointer(c + 1))) != 0) { + for ; int32(*(*int8)(unsafe.Pointer(s))) == int32(*(*int8)(unsafe.Pointer(c))); s++ { + } + return size_t((int64(s) - int64(a)) / 1) + } + + for ; *(*int8)(unsafe.Pointer(c)) != 0 && AssignOrPtrUint64(bp+uintptr(size_t(*(*uint8)(unsafe.Pointer(c)))/(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))*8, size_t(uint64(1))<<(size_t(*(*uint8)(unsafe.Pointer(c)))%(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))) != 0; c++ { + } + for ; *(*int8)(unsafe.Pointer(s)) != 0 && *(*size_t)(unsafe.Pointer(bp + uintptr(size_t(*(*uint8)(unsafe.Pointer(s)))/(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))*8))&(size_t(uint64(1))<<(size_t(*(*uint8)(unsafe.Pointer(s)))%(uint64(8)*uint64(unsafe.Sizeof(size_t(0)))))) != 0; s++ { + } + return size_t((int64(s) - int64(a)) / 1) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 349c6c4a27..58ea1f72f9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/CortexFoundation/robot/backend # github.com/CortexFoundation/statik v0.0.0-20210315012922-8bb8a7b5dc66 ## explicit; go 1.16 github.com/CortexFoundation/statik -# github.com/CortexFoundation/torrentfs v1.0.59-0.20240129113837-7f5c3c53b617 +# github.com/CortexFoundation/torrentfs v1.0.59-0.20240202155446-7354cfa88cb3 ## explicit; go 1.21 github.com/CortexFoundation/torrentfs github.com/CortexFoundation/torrentfs/backend @@ -74,7 +74,7 @@ github.com/CortexFoundation/wormhole # github.com/DataDog/zstd v1.5.6-0.20230622172052-ea68dcab66c0 ## explicit; go 1.14 github.com/DataDog/zstd -# github.com/RoaringBitmap/roaring v1.8.0 +# github.com/RoaringBitmap/roaring v1.9.0 ## explicit; go 1.14 github.com/RoaringBitmap/roaring github.com/RoaringBitmap/roaring/BitSliceIndexing @@ -193,10 +193,10 @@ github.com/anacrolix/upnp # github.com/anacrolix/utp v0.2.0 ## explicit; go 1.20 github.com/anacrolix/utp -# github.com/antlabs/stl v0.0.1 +# github.com/antlabs/stl v0.0.2 ## explicit; go 1.13 github.com/antlabs/stl/list -# github.com/antlabs/timer v0.0.12 +# github.com/antlabs/timer v0.1.1 ## explicit; go 1.19 github.com/antlabs/timer # github.com/apapsch/go-jsonmerge/v2 v2.0.0 @@ -264,7 +264,7 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding # github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/route53 v1.37.0 +# github.com/aws/aws-sdk-go-v2/service/route53 v1.37.1 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/route53 github.com/aws/aws-sdk-go-v2/service/route53/internal/customizations @@ -342,7 +342,7 @@ github.com/cespare/xxhash/v2 # github.com/charmbracelet/bubbletea v0.25.0 ## explicit; go 1.17 github.com/charmbracelet/bubbletea -# github.com/cloudflare/cloudflare-go v0.86.0 +# github.com/cloudflare/cloudflare-go v0.87.0 ## explicit; go 1.19 github.com/cloudflare/cloudflare-go # github.com/cockroachdb/errors v1.11.1 @@ -369,7 +369,7 @@ github.com/cockroachdb/errors/withstack # github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b ## explicit; go 1.16 github.com/cockroachdb/logtags -# github.com/cockroachdb/pebble v0.0.0-20240123194302-5b280af78f31 +# github.com/cockroachdb/pebble v0.0.0-20240202151741-904a6c99689c ## explicit; go 1.21 github.com/cockroachdb/pebble github.com/cockroachdb/pebble/batchrepr @@ -406,6 +406,7 @@ github.com/cockroachdb/pebble/record github.com/cockroachdb/pebble/sstable github.com/cockroachdb/pebble/vfs github.com/cockroachdb/pebble/vfs/atomicfs +github.com/cockroachdb/pebble/wal # github.com/cockroachdb/redact v1.1.5 ## explicit; go 1.14 github.com/cockroachdb/redact @@ -621,7 +622,7 @@ github.com/google/go-querystring/query ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20240125082051-42cd04596328 +# github.com/google/pprof v0.0.0-20240130152714-0ed6a68c8d9e ## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 @@ -688,7 +689,7 @@ github.com/influxdata/line-protocol # github.com/jackpal/go-nat-pmp v1.0.2 ## explicit github.com/jackpal/go-nat-pmp -# github.com/jedib0t/go-pretty/v6 v6.5.3 +# github.com/jedib0t/go-pretty/v6 v6.5.4 ## explicit; go 1.17 github.com/jedib0t/go-pretty/v6/progress github.com/jedib0t/go-pretty/v6/text @@ -786,6 +787,9 @@ github.com/naoina/go-stringutil ## explicit github.com/naoina/toml github.com/naoina/toml/ast +# github.com/ncruces/go-strftime v0.1.9 +## explicit; go 1.17 +github.com/ncruces/go-strftime # github.com/nutsdb/nutsdb v1.0.4-0.20240108030426-a4ca1b95b3d6 ## explicit; go 1.18 github.com/nutsdb/nutsdb @@ -999,7 +1003,7 @@ github.com/ucwong/filecache # github.com/ucwong/go-ttlmap v1.0.2-0.20221020173635-331e7ddde2bb ## explicit; go 1.19 github.com/ucwong/go-ttlmap -# github.com/ucwong/golang-kv v1.0.24-0.20240123222739-ef750e9bde39 +# github.com/ucwong/golang-kv v1.0.24-0.20240202153641-e1b6e9221f93 ## explicit; go 1.21 github.com/ucwong/golang-kv github.com/ucwong/golang-kv/badger @@ -1236,7 +1240,7 @@ gopkg.in/urfave/cli.v1 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# modernc.org/libc v1.40.8 +# modernc.org/libc v1.40.13 ## explicit; go 1.20 modernc.org/libc modernc.org/libc/errno