diff --git a/go.mod b/go.mod index b0de6461ad5..63b2f0e7b94 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/hashicorp/memberlist v0.5.0 github.com/json-iterator/go v1.1.12 github.com/lib/pq v1.10.7 - github.com/minio/minio-go/v7 v7.0.37 + github.com/minio/minio-go/v7 v7.0.45 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -47,8 +47,8 @@ require ( github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.9.3 github.com/stretchr/testify v1.8.1 - github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 - github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d + github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 + github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae go.etcd.io/etcd/api/v3 v3.5.6 @@ -108,7 +108,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.4.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd // indirect + github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect @@ -170,7 +170,7 @@ require ( github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.7.1 // indirect + github.com/prometheus/exporter-toolkit v0.7.3 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/cors v1.8.2 // indirect diff --git a/go.sum b/go.sum index ee2d19e9cc1..7a6933a402c 100644 --- a/go.sum +++ b/go.sum @@ -254,10 +254,9 @@ github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/efficientgo/core v1.0.0-rc.0 h1:jJoA0N+C4/knWYVZ6GrdHOtDyrg8Y/TR4vFpTaqTsqs= +github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 h1:rydBwnBoywKQMjWF0z8SriYtQ+uUcaFsxuijMjJr5PI= +github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4/go.mod h1:kQa0V74HNYMfuJH6jiPiwNdpWXl4xd/K4tzlrcvYDQI= github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= -github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd h1:svR6KxSP1xiPw10RN4Pd7g6BAVkEcNN628PAqZH31mM= -github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= @@ -706,8 +705,8 @@ github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.37 h1:aJvYMbtpVPSFBck6guyvOkxK03MycxDOCs49ZBuY5M8= -github.com/minio/minio-go/v7 v7.0.37/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs= +github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -848,8 +847,9 @@ github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJ github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.7.3 h1:IYBn0CTGi/nYxstdTUKysuSofUNJ3DQW3FmZ/Ub6rgU= +github.com/prometheus/exporter-toolkit v0.7.3/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -934,10 +934,10 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 h1:9dceDSKKsLWNHjrMpyzK1t7eVcAZv9Dp3FX+uokUS2Y= -github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604/go.mod h1:Vx5dZs9ElxEhNLnum/OgB0pNTqNdI2zdXL82BeJr3T4= -github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d h1:cfGwZH4LBrkFbQHea7HUlNDOQhILGBRPxDWjy4FhAME= -github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d/go.mod h1:odqdxSO+o/UaVgNpdkYYaQUW/JpT7LByXyZmxoe6uoc= +github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 h1:xUnLk2CwIoJyv6OB4MWC3aOH9TnneSgM5kgTsOmXIuI= +github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06/go.mod h1:gdo4vwwonBnheHB/TCwAOUtKJKrLhLtbBVTQR9rN/v0= +github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 h1:NRu4DwMAHgI+AJ0eevwQ8mVgrT15KskHOIKtQ0T3/EE= +github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5/go.mod h1:2bDlu7xW2TXrSt/TGBozMMVECjjrnqoLJKc/ZIiPngA= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1044,7 +1044,6 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= @@ -1370,7 +1369,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index 859499862f8..680ad43a6e0 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -475,6 +475,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro store.WithIndexCache(u.indexCache), store.WithQueryGate(u.queryGate), store.WithChunkPool(u.chunksPool), + store.WithSeriesBatchSize(store.SeriesBatchSize), } if u.logLevel.String() == "debug" { bucketStoreOpts = append(bucketStoreOpts, store.WithDebugLogging()) diff --git a/vendor/github.com/efficientgo/core/COPYRIGHT b/vendor/github.com/efficientgo/core/COPYRIGHT new file mode 100644 index 00000000000..0b803fd346d --- /dev/null +++ b/vendor/github.com/efficientgo/core/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The EfficientGo Authors. +Licensed under the Apache License 2.0. \ No newline at end of file diff --git a/vendor/github.com/efficientgo/tools/core/LICENSE b/vendor/github.com/efficientgo/core/LICENSE similarity index 100% rename from vendor/github.com/efficientgo/tools/core/LICENSE rename to vendor/github.com/efficientgo/core/LICENSE diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go b/vendor/github.com/efficientgo/core/errcapture/do.go similarity index 89% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go rename to vendor/github.com/efficientgo/core/errcapture/do.go index c7176a475b2..40f9eb9d7eb 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go +++ b/vendor/github.com/efficientgo/core/errcapture/do.go @@ -10,11 +10,10 @@ package errcapture import ( "io" - "io/ioutil" "os" - "github.com/efficientgo/tools/core/pkg/merrors" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/merrors" ) type doFunc func() error @@ -38,7 +37,7 @@ func Do(err *error, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with error capture but exhausts the reader before. func ExhaustClose(err *error, r io.ReadCloser, format string, a ...interface{}) { - _, copyErr := io.Copy(ioutil.Discard, r) + _, copyErr := io.Copy(io.Discard, r) Do(err, r.Close, format, a...) diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go b/vendor/github.com/efficientgo/core/errcapture/doc.go similarity index 71% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go rename to vendor/github.com/efficientgo/core/errcapture/doc.go index 96c1c22918f..ab7f2ed82ba 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go +++ b/vendor/github.com/efficientgo/core/errcapture/doc.go @@ -1,29 +1,43 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package errcapture - +// Package errcapture implements robust error handling in defer statements using provided return argument. +// // Close a `io.Closer` interface or execute any function that returns error safely while capturing error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `errcapture` provides utility functions to capture error and add to provided one, // still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer errcapture.Do(&err, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer errcapture.Do(&err, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `errcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File) (err error) { +// defer errcapture.Do(&err, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The errcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/logerrcapture if you want to just log an error instead. +package errcapture diff --git a/vendor/github.com/efficientgo/core/errors/doc.go b/vendor/github.com/efficientgo/core/errors/doc.go new file mode 100644 index 00000000000..c489b2e2f27 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/doc.go @@ -0,0 +1,13 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package errors provides basic utilities to manipulate errors with a useful stacktrace. It combines the +// benefits of errors.New and fmt.Errorf world into a single package. It is an improved and minimal version of the popular +// https://github.com/pkg/errors (archived) package allowing reliable wrapping of errors with stacktrace. Unfortunately, +// the standard library recommended error wrapping using `%+w` is prone to errors and does not support stacktraces. +package errors diff --git a/vendor/github.com/efficientgo/core/errors/errors.go b/vendor/github.com/efficientgo/core/errors/errors.go new file mode 100644 index 00000000000..ab032cfe105 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/errors.go @@ -0,0 +1,168 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "errors" + "fmt" + "strings" +) + +// base is the fundamental struct that implements the error interface and the acts as the backbone of this errors package. +type base struct { + // info contains the error message passed through calls like errors.Wrap, errors.New. + info string + // stacktrace stores information about the program counters - i.e. where this error was generated. + stack stacktrace + // err is the actual error which is being wrapped with a stacktrace and message information. + err error +} + +// Error implements the error interface. +func (b *base) Error() string { + if b.err != nil { + return fmt.Sprintf("%s: %s", b.info, b.err.Error()) + } + return b.info +} + +// Unwrap implements the error Unwrap interface. +func (b *base) Unwrap() error { + return b.err +} + +// Format implements the fmt.Formatter interface to support the formatting of an error chain with "%+v" verb. +// Whenever error is printed with %+v format verb, stacktrace info gets dumped to the output. +func (b *base) Format(s fmt.State, verb rune) { + if verb == 'v' && s.Flag('+') { + s.Write([]byte(formatErrorChain(b))) + return + } + + s.Write([]byte(b.Error())) +} + +// New returns a new error with a stacktrace of recent call frames. Each call to New +// returns a distinct error value even if the text is identical. An alternative of +// the errors.New function from github.com/pkg/errors. +func New(message string) error { + return &base{ + info: message, + stack: newStackTrace(), + err: nil, + } +} + +// Newf is like New, but it formats input according to a format specifier. +// An alternative of the fmt.Errorf function. +// +// If no args have been passed, it is same as `New` function without formatting. Character like +// '%' still has to be escaped in that scenario. +func Newf(format string, args ...interface{}) error { + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: nil, + } +} + +// Wrap returns a new error, which wraps another error with a stacktrace containing recent call frames. +// +// If cause is nil, it does not produce the error, similar to errors.Wrap from github.com/pkg/errors was doing. +// Still, avoid `errors.Wrap(nil, "...") patterns as it can lead to inefficiencies while constructing arguments +// to format as well readability issues. We keep this behaviour to make sure it's a drop-in replacement. +func Wrap(cause error, message string) error { + if cause == nil { + return nil + } + + return &base{ + info: message, + stack: newStackTrace(), + err: cause, + } +} + +// Wrapf is like Wrap but the message is formatted with the supplied format specifier. +// +// If no args have been passed, it is same as `Wrap` function without formatting. +// Character like '%' still has to be escaped in that scenario. +func Wrapf(cause error, format string, args ...interface{}) error { + if cause == nil { + return nil + } + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: cause, + } +} + +// Cause returns the result of repeatedly calling the Unwrap method on err, if err's +// type implements an Unwrap method. Otherwise, Cause returns the last encountered error. +// The difference between Unwrap and Cause is the first one performs unwrapping of one level +// but Cause returns the last err (whether it's nil or not) where it failed to assert +// the interface containing the Unwrap method. +// This is a replacement of errors.Cause without the causer interface from pkg/errors which +// actually can be sufficed through the errors.Is function. But considering some use cases +// where we need to peel off all the external layers applied through errors.Wrap family, +// it is useful ( where external SDK doesn't use errors.Is internally). +func Cause(err error) error { + for err != nil { + e, ok := err.(interface { + Unwrap() error + }) + if !ok { + return err + } + err = e.Unwrap() + } + return nil +} + +// formatErrorChain formats an error chain. +func formatErrorChain(err error) string { + var buf strings.Builder + for err != nil { + if e, ok := err.(*base); ok { + buf.WriteString(fmt.Sprintf("%s\n%v", e.info, e.stack)) + err = e.err + } else { + buf.WriteString(fmt.Sprintf("%s\n", err.Error())) + err = nil + } + } + return buf.String() +} + +// The functions `Is`, `As` & `Unwrap` provides a thin wrapper around the builtin errors +// package in go. Just for sake of completeness and correct autocompletion behaviors from +// IDEs they have been wrapped using functions instead of using variable to reference them +// as first class functions (eg: var Is = errros.Is ). + +// Is is a wrapper of built-in errors.Is. It reports whether any error in err's +// chain matches target. The chain consists of err itself followed by the sequence +// of errors obtained by repeatedly calling Unwrap. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As is a wrapper of built-in errors.As. It finds the first error in err's +// chain that matches target, and if one is found, sets target to that error +// value and returns true. Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} + +// Unwrap is a wrapper of built-in errors.Unwrap. Unwrap returns the result of +// calling the Unwrap method on err, if err's type contains an Unwrap method +// returning error. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return errors.Unwrap(err) +} diff --git a/vendor/github.com/efficientgo/core/errors/stacktrace.go b/vendor/github.com/efficientgo/core/errors/stacktrace.go new file mode 100644 index 00000000000..a479977d812 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/stacktrace.go @@ -0,0 +1,58 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "fmt" + "runtime" + "strings" +) + +// stacktrace holds a snapshot of program counters. +type stacktrace []uintptr + +// newStackTrace captures a stack trace. It skips first 3 frames to record the +// snapshot of the stack trace at the origin of a particular error. It tries to +// record maximum 16 frames (if available). +func newStackTrace() stacktrace { + const stackDepth = 16 // record maximum 16 frames (if available). + + pc := make([]uintptr, stackDepth) + // using skip=3 for not to count the program counter address of + // 1. the respective function from errors package (eg. errors.New) + // 2. newStacktrace itself + // 3. the function used in runtime.Callers + n := runtime.Callers(3, pc) + + // this approach is taken to reduce long term memory footprint (obtained through escape analysis). + // We are returning a new slice by re-slicing the pc with the required length and capacity (when the + // no of returned callFrames is less that stackDepth). This uses less memory compared to pc[:n] as + // the capacity of new slice is inherited from the parent slice if not specified. + return pc[:n:n] +} + +// String implements the fmt.Stringer interface to provide formatted text output. +func (s stacktrace) String() string { + var buf strings.Builder + + // CallersFrames takes the slice of Program Counter addresses returned by Callers to + // retrieve function/file/line information. + cf := runtime.CallersFrames(s) + for { + // more indicates if the next call will be successful or not. + frame, more := cf.Next() + // used formatting scheme <`>`space><:> for example: + // > testing.tRunner /home/go/go1.17.8/src/testing/testing.go:1259 + buf.WriteString(fmt.Sprintf("> %s\t%s:%d\n", frame.Func.Name(), frame.File, frame.Line)) + if !more { + break + } + } + return buf.String() +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go b/vendor/github.com/efficientgo/core/logerrcapture/do.go similarity index 94% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go rename to vendor/github.com/efficientgo/core/logerrcapture/do.go index 41662b90138..6bad9fa6eba 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/do.go @@ -11,10 +11,9 @@ package logerrcapture import ( "fmt" "io" - "io/ioutil" "os" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" ) // Logger interface compatible with go-kit/logger. @@ -42,7 +41,7 @@ func Do(logger Logger, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with a log message on error but exhausts the reader before. func ExhaustClose(logger Logger, r io.ReadCloser, format string, a ...interface{}) { - _, err := io.Copy(ioutil.Discard, r) + _, err := io.Copy(io.Discard, r) if err != nil { _ = logger.Log("msg", "failed to exhaust reader, performance may be impeded", "err", err) } diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go b/vendor/github.com/efficientgo/core/logerrcapture/doc.go similarity index 66% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go rename to vendor/github.com/efficientgo/core/logerrcapture/doc.go index 414efc99582..90738562ceb 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/doc.go @@ -1,30 +1,44 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package logerrcapture - -// Close a `io.Closer` interface or execute any function that returns error safely while logging error. +// Package logerrcapture implements robust error handling in defer statements using provided logger. +// +// The Close a `io.Closer` interface or execute any function that returns error safely while logging error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `logerrcapture` provides utility functions to capture error and log it via provided // logger, while still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer logerrcapture.Do(logger, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer logerrcapture.Do(logger, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `logerrcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File, logger logerrcapture.Logger) error { +// defer logerrcapture.Do(logger, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The logerrcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Recommended: Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/errcapture if you want to return error instead of just logging (causing // hard error). +package logerrcapture diff --git a/vendor/github.com/efficientgo/core/merrors/doc.go b/vendor/github.com/efficientgo/core/merrors/doc.go new file mode 100644 index 00000000000..a670493f46a --- /dev/null +++ b/vendor/github.com/efficientgo/core/merrors/doc.go @@ -0,0 +1,29 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package merrors implements multi error implementation that chains errors on the same level. +// Supports errors.As and errors.Is functions. +// +// Example 1: +// +// return merrors.New(err1, err2).Err() +// +// Example 2: +// +// merr := merrors.New(err1) +// merr.Add(err2, errOrNil3) +// for _, err := range errs { +// merr.Add(err) +// } +// return merr.Err() +// +// Example 3: +// +// func CloseAll(closers []io.Closer) error { +// errs := merrors.New() +// for _ , c := range closers { +// errs.Add(c.Close()) +// } +// return errs.Err() +// } +package merrors diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go b/vendor/github.com/efficientgo/core/merrors/merrors.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go rename to vendor/github.com/efficientgo/core/merrors/merrors.go diff --git a/vendor/github.com/efficientgo/core/testutil/doc.go b/vendor/github.com/efficientgo/core/testutil/doc.go new file mode 100644 index 00000000000..ac4ca1df551 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package testutil is a minimal testing utility with only few functions like `Assert`, `Ok`, `NotOk` for errors and `Equals`. +// It also provides TB ("TestOrBench") utility for union of testing and benchmarks. +package testutil diff --git a/vendor/github.com/efficientgo/core/testutil/internal/difflib.go b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go new file mode 100644 index 00000000000..aea79d21543 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go @@ -0,0 +1,645 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go b/vendor/github.com/efficientgo/core/testutil/testorbench.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go rename to vendor/github.com/efficientgo/core/testutil/testorbench.go diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go b/vendor/github.com/efficientgo/core/testutil/testutil.go similarity index 68% rename from vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go rename to vendor/github.com/efficientgo/core/testutil/testutil.go index 8a46d2e68f5..c74f368507b 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go +++ b/vendor/github.com/efficientgo/core/testutil/testutil.go @@ -12,9 +12,8 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" - "github.com/pmezard/go-difflib/difflib" - "go.uber.org/goleak" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/testutil/internal" ) // Assert fails the test if the condition is false. @@ -77,6 +76,68 @@ func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { tb.Fatal(sprintfWithLimit("\033[31m%s:%d:"+msg+"\n\n\texp: %#v\n\n\tgot: %#v%s\033[39m\n\n", filepath.Base(file), line, exp, act, diff(exp, act))) } +// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. +func FaultOrPanicToErr(f func()) (err error) { + // Set this go routine to panic on segfault to allow asserting on those. + debug.SetPanicOnFault(true) + defer func() { + if r := recover(); r != nil { + err = errors.Newf("invoked function panicked or caused segmentation fault: %v", r) + } + debug.SetPanicOnFault(false) + }() + + f() + + return err +} + +// ContainsStringSlice fails the test if needle is not contained within haystack, if haystack or needle is +// an empty slice, or if needle is longer than haystack. +func ContainsStringSlice(tb testing.TB, haystack, needle []string) { + _, file, line, _ := runtime.Caller(1) + + if !contains(haystack, needle) { + tb.Fatalf(sprintfWithLimit("\033[31m%s:%d: %#v does not contain %#v\033[39m\n\n", filepath.Base(file), line, haystack, needle)) + } +} + +func contains(haystack, needle []string) bool { + if len(haystack) == 0 || len(needle) == 0 { + return false + } + + if len(haystack) < len(needle) { + return false + } + + for i := 0; i < len(haystack); i++ { + outer := i + + for j := 0; j < len(needle); j++ { + // End of the haystack but not the end of the needle, end + if outer == len(haystack) { + return false + } + + // No match, try the next index of the haystack + if haystack[outer] != needle[j] { + break + } + + // End of the needle and it still matches, end + if j == len(needle)-1 { + return true + } + + // This element matches between the two slices, try the next one + outer++ + } + } + + return false +} + func sprintfWithLimit(act string, v ...interface{}) string { s := fmt.Sprintf(act, v...) if len(s) > 10000 { @@ -128,9 +189,9 @@ func diff(expected interface{}, actual interface{}) string { a = reflect.ValueOf(actual).String() } - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), + diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ + A: internal.SplitLines(e), + B: internal.SplitLines(a), FromFile: "Expected", FromDate: "", ToFile: "Actual", @@ -139,43 +200,3 @@ func diff(expected interface{}, actual interface{}) string { }) return "\n\nDiff:\n" + diff } - -// TolerantVerifyLeakMain verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeakMain(m *testing.M) { - goleak.VerifyTestMain(m, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// TolerantVerifyLeak verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeak(t *testing.T) { - goleak.VerifyNone(t, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. -func FaultOrPanicToErr(f func()) (err error) { - // Set this go routine to panic on segfault to allow asserting on those. - debug.SetPanicOnFault(true) - defer func() { - if r := recover(); r != nil { - err = errors.Errorf("invoked function panicked or caused segmentation fault: %v", r) - } - debug.SetPanicOnFault(false) - }() - - f() - - return err -} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go deleted file mode 100644 index d1eaf8333cb..00000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package merrors - -// Safe multi error implementation that chains errors on the same level. Supports errors.As and errors.Is functions. -// -// Example 1: -// -// return merrors.New(err1, err2).Err() -// -// Example 2: -// -// merr := merrors.New(err1) -// merr.Add(err2, errOrNil3) -// for _, err := range errs { -// merr.Add(err) -// } -// return merr.Err() -// diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go deleted file mode 100644 index dc9f63f1b0a..00000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package testutil - -// Simplistic assertion helpers for testing code. TestOrBench utils for union of testing and benchmarks. diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore index 8081bd0ffbf..12ecd6ae9b7 100644 --- a/vendor/github.com/minio/minio-go/v7/.gitignore +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -1,4 +1,5 @@ *~ *.test validator -golangci-lint \ No newline at end of file +golangci-lint +functional_tests \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml index dfc0c2d5377..875b949c6dd 100644 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -12,8 +12,7 @@ linters: - govet - ineffassign - gosimple - - deadcode - - structcheck + - unused - gocritic issues: @@ -25,3 +24,4 @@ issues: - "captLocal:" - "ifElseChain:" - "elseif:" + - "should have a package comment" diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index ac4a328f0e3..ace66670542 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -9,7 +9,7 @@ checks: lint vet test examples functional-test lint: @mkdir -p ${GOPATH}/bin - @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin @echo "Running $@ check" @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml @@ -27,7 +27,8 @@ examples: @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) functional-test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + @GO111MODULE=on go build -race functional_tests.go + @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests clean: @echo "Cleaning up all the generated files" diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md index 4e3abf71d2d..9b6bbbec342 100644 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -2,7 +2,7 @@ The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. -This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). This document assumes that you have a working [Go development environment](https://golang.org/doc/install). @@ -126,53 +126,53 @@ mc ls play/mymusic/ ## API Reference The full API Reference is available here. -* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) ### API Reference : File Object Operations -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) ### API Reference : Object Operations -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## Full Examples @@ -236,8 +236,8 @@ The full API Reference is available here. * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## Explore Further -* [Complete Documentation](https://docs.min.io) -* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ## Contribute [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md index 64e79341196..97087c61e0c 100644 --- a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md +++ b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md @@ -14,7 +14,7 @@ MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对 - Ceph Object Gateway - Riak CS -本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html)。 本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 @@ -140,52 +140,52 @@ mc ls play/mymusic/ ## API文档 完整的API文档在这里。 -* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整API文档](https://min.io/docs/minio/linux/developers/go/API.html) ### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO 扩展) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO 扩展) ### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) ### API文档 : 操作对象 -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## 完整示例 @@ -253,8 +253,8 @@ mc ls play/mymusic/ * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## 了解更多 -* [完整文档](https://docs.min.io) -* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整文档](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API文档](https://min.io/docs/minio/linux/developers/go/API.html) ## 贡献 [贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index b59924a3d51..835c8bd8ad5 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -221,7 +221,7 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) } if dstOpts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if !dstOpts.Internal.LegalholdTimestamp.IsZero() { headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go index 5a8d473c623..1f479387772 100644 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -45,19 +45,20 @@ type StringMap map[string]string // on the first line is initialize it. func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { *m = StringMap{} - type xmlMapEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` + type Item struct { + Key string + Value string } for { - var e xmlMapEntry + var e Item err := d.Decode(&e) if err == io.EOF { break - } else if err != nil { + } + if err != nil { return err } - (*m)[e.XMLName.Local] = e.Value + (*m)[e.Key] = e.Value } return nil } @@ -86,6 +87,8 @@ type UploadInfo struct { ExpirationRuleID string // Verified checksum values, if any. + // Values are base64 (standard) encoded. + // For multipart objects this is a checksum of the checksum of each part. ChecksumCRC32 string ChecksumCRC32C string ChecksumSHA1 string @@ -118,7 +121,7 @@ type ObjectInfo struct { Metadata http.Header `json:"metadata" xml:"-"` // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - UserMetadata StringMap `json:"userMetadata"` + UserMetadata StringMap `json:"userMetadata,omitempty"` // x-amz-tagging values in their k/v values. UserTags map[string]string `json:"userTags"` @@ -146,7 +149,8 @@ type ObjectInfo struct { // - FAILED // - REPLICA (on the destination) ReplicationStatus string `xml:"ReplicationStatus"` - + // set to true if delete marker has backing object version on target, and eligible to replicate + ReplicationReady bool // Lifecycle expiry-date and ruleID associated with the expiry // not to be confused with `Expires` HTTP header. Expiration time.Time diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index dd781cae33b..33ca394588e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -67,14 +67,14 @@ type ErrorResponse struct { // // For example: // -// import s3 "github.com/minio/minio-go/v7" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... func ToErrorResponse(err error) ErrorResponse { switch err := err.(type) { case ErrorResponse: diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go index 08ae95c4249..0082c1fa762 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -27,8 +27,9 @@ import ( // AdvancedGetOptions for internal use by MinIO server - not intended for client use. type AdvancedGetOptions struct { - ReplicationDeleteMarker bool - ReplicationProxyRequest string + ReplicationDeleteMarker bool + IsReplicationReadyForDeleteMarker bool + ReplicationProxyRequest string } // GetObjectOptions are used to specify additional headers or options diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index b624b07786e..d216afb3972 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -32,11 +32,10 @@ import ( // This call requires explicit authentication, no anonymous requests are // allowed for listing buckets. // -// api := client.New(....) -// for message := range api.ListBuckets(context.Background()) { -// fmt.Println(message) -// } -// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Execute GET on service. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) @@ -71,21 +70,28 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List // Return object owner information by default fetchOwner := true + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -99,9 +105,9 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -138,6 +144,14 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List if !result.IsTruncated { return } + + // Add this to catch broken S3 API implementations. + if continuationToken == "" { + sendObjectInfo(ObjectInfo{ + Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL), + }) + return + } } }(objectStatCh) return objectStatCh @@ -263,20 +277,28 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // If recursive we do not delimit. delimiter = "" } + + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -289,9 +311,9 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // Get list of objects a maximum of 1000 per request. result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -344,21 +366,28 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts delimiter = "" } + sendObjectInfo := func(info ObjectInfo) { + select { + case resultCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } @@ -375,9 +404,9 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts // Get list of objects a maximum of 1000 per request. result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -659,11 +688,10 @@ func (o *ListObjectsOptions) Set(key, value string) { // ListObjects returns objects list after evaluating the passed options. // -// api := client.New(....) -// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { -// fmt.Println(object) -// } -// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { if opts.WithVersions { return c.listObjectVersions(ctx, bucketName, opts) @@ -694,12 +722,12 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb // If you enable recursive as 'true' this function will return back all // the multipart objects in a given bucket name. // -// api := client.New(....) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) } @@ -916,7 +944,7 @@ func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName strin } // listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded +// - lists some or all (up to 1000) parts that have been uploaded // for a specific multipart upload // // You can use the request parameters as selection criteria to return diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go index 149a536e42f..7fad7600c3d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -65,10 +65,9 @@ func isReadAt(reader io.Reader) (ok bool) { // NOTE: Assumption here is that for any object to be uploaded to any S3 compatible // object storage it will have the following parameters as constants. // -// maxPartsCount - 10000 -// minPartSize - 16MiB -// maxMultipartPutObjectSize - 5TiB -// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { // object size is '-1' set it to 5TiB. var unknownSize bool diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 5fec17a1c45..3c9a13ff20b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -159,8 +159,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj crcBytes = append(crcBytes, cSum...) } + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -269,57 +270,73 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object return initiateMultipartUploadResult, nil } +type uploadPartParams struct { + bucketName string + objectName string + uploadID string + reader io.Reader + partNumber int + md5Base64 string + sha256Hex string + size int64 + sse encrypt.ServerSide + streamSha256 bool + customHeader http.Header + trailer http.Header +} + // uploadPart - Uploads a part in a multipart upload. -func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName string, uploadID string, reader io.Reader, partNumber int, md5Base64 string, sha256Hex string, size int64, sse encrypt.ServerSide, streamSha256 bool, customHeader http.Header) (ObjectPart, error) { +func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } - if err := s3utils.CheckValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } - if size > maxPartSize { - return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) + if p.size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } - if size <= -1 { - return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) + if p.size <= -1 { + return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } - if partNumber <= 0 { + if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } - if uploadID == "" { + if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. - urlValues.Set("uploadId", uploadID) + urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. - if customHeader == nil { - customHeader = make(http.Header) + if p.customHeader == nil { + p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) + if p.sse != nil && p.sse.Type() == encrypt.SSEC { + p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, + bucketName: p.bucketName, + objectName: p.objectName, queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - streamSha256: streamSha256, + customHeader: p.customHeader, + contentBody: p.reader, + contentLength: p.size, + contentMD5Base64: p.md5Base64, + contentSHA256Hex: p.sha256Hex, + streamSha256: p.streamSha256, + trailer: p.trailer, } // Execute PUT on each part. @@ -330,7 +347,7 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. @@ -341,8 +358,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } - objPart.Size = size - objPart.PartNumber = partNumber + objPart.Size = p.size + objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil @@ -431,5 +448,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, + + ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, + ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, + ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, + ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 464bde7f305..e3a14c59d8e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -107,11 +107,19 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN return UploadInfo{}, err } + withChecksum := c.trailingHeaderSupport + if withChecksum { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload in progress, if the // function returns any error, since we do not resume @@ -177,14 +185,33 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // As a special case if partNumber is lastPartNumber, we // calculate the offset based on the last part size. if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) + readOffset = size - lastPartSize partSize = lastPartSize } sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + var trailer = make(http.Header, 1) + if withChecksum { + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + } // Proceed to upload the part. - objPart, err := c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, "", "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, nil) + p := uploadPartParams{bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: sectionReader, + partNumber: uploadReq.PartNum, + size: partSize, + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + sha256Hex: "", + trailer: trailer, + } + objPart, err := c.uploadPart(ctx, p) if err != nil { uploadedPartsCh <- uploadedPartRes{ Error: err, @@ -221,8 +248,12 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Update the totalUploadedSize. totalUploadedSize += uploadRes.Size complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, }) } } @@ -235,6 +266,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) + if withChecksum { + // Add hash of hashes. + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + for _, part := range complMultipartUpload.Parts { + cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) + if err == nil { + crc.Write(cs) + } + } + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err @@ -339,8 +382,8 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Update progress reader appropriately to the latest offset // as we read from the source. hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, hooked, partNumber, md5Base64, "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -419,6 +462,7 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") } + var readSeeker io.Seeker if size > 0 { if isReadAt(reader) && !isObject(reader) { seeker, ok := reader.(io.Seeker) @@ -428,35 +472,49 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument(err.Error()) } reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + readSeeker = reader.(io.Seeker) } } } var md5Base64 string if opts.SendContentMd5 { - // Create a buffer. - buf := make([]byte, size) + // Calculate md5sum. + hash := c.md5Hasher() - length, rErr := readFull(reader, buf) - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return UploadInfo{}, rErr + if readSeeker != nil { + if _, err := io.Copy(hash, reader); err != nil { + return UploadInfo{}, err + } + // Seek back to beginning of io.NewSectionReader's offset. + _, err = readSeeker.Seek(0, io.SeekStart) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } else { + // Create a buffer. + buf := make([]byte, size) + + length, err := readFull(reader, buf) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, err + } + + hash.Write(buf[:length]) + reader = bytes.NewReader(buf[:length]) } - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - reader = bytes.NewReader(buf[:length]) hash.Close() } // Update progress reader appropriately to the latest offset as we // read from the source. - readSeeker := newHook(reader, opts.Progress) + progressReader := newHook(reader, opts.Progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) + return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) } // putObjectDo - executes the put object http operation. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 321ad00aae4..5735bee5eb2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -159,7 +159,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) } if opts.Internal.ReplicationRequest { - header.Set(minIOBucketReplicationRequest, "") + header.Set(minIOBucketReplicationRequest, "true") } if !opts.Internal.LegalholdTimestamp.IsZero() { header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) @@ -269,6 +269,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str } if size < 0 { + if opts.DisableMultipart { + return UploadInfo{}, errors.New("no length provided and multipart disabled") + } return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } @@ -366,7 +369,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, "", int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 0fee90226b6..c0ff31a5bd2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -82,8 +82,8 @@ func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, // RemoveBucket deletes the bucket name. // -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -166,7 +166,7 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) } if opts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if opts.ForceDelete { headers.Set(minIOForceDelete, "true") diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index b1b848f4a65..827395ee125 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -316,17 +316,15 @@ type completeMultipartUploadResult struct { // CompletePart sub container lists individual part numbers and their // md5sum, part of completeMultipartUpload. type CompletePart struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` - // Part number identifies the part. PartNumber int ETag string // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` } // completeMultipartUpload container for completing multipart upload. diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index 6deb5f5dc94..418d6cb2547 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -70,6 +70,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if opts.Internal.ReplicationDeleteMarker { headers.Set(minIOBucketReplicationDeleteMarker, "true") } + if opts.Internal.IsReplicationReadyForDeleteMarker { + headers.Set(isMinioTgtReplicationReady, "true") + } urlValues := make(url.Values) if opts.VersionID != "" { @@ -90,6 +93,7 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if resp != nil { deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { errResp := ErrorResponse{ @@ -105,8 +109,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, }, errResp } return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationReady: replicationReady, // whether delete marker can be replicated }, httpRespToErrorResponse(resp, bucketName, objectName) } } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 6598e9d92eb..a93a6c6207a 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -20,8 +20,10 @@ package minio import ( "bytes" "context" + "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "io/ioutil" "math/rand" @@ -93,6 +95,8 @@ type Client struct { sha256Hasher func() md5simd.Hasher healthStatus int32 + + trailingHeaderSupport bool } // Options for New method @@ -103,6 +107,10 @@ type Options struct { Region string BucketLookup BucketLookupType + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool + // Custom hash routines. Leave nil to use standard. CustomMD5 func() md5simd.Hasher CustomSHA256 func() md5simd.Hasher @@ -111,13 +119,13 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.37" + libraryVersion = "v7.0.45" ) // User Agent should always following the below style. // Please open an issue to discuss any new changes here. // -// MinIO (OS; ARCH) LIB/VER APP/VER +// MinIO (OS; ARCH) LIB/VER APP/VER const ( libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion @@ -246,6 +254,9 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { if clnt.sha256Hasher == nil { clnt.sha256Hasher = newSHA256Hasher } + + clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. clnt.lookup = opts.BucketLookup @@ -312,9 +323,9 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { // Hash materials provides relevant initialized hash algo writers // based on the expected signature type. // -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { hashSums = make(map[string][]byte) hashAlgos = make(map[string]md5simd.Hasher) @@ -419,6 +430,8 @@ type requestMetadata struct { contentMD5Base64 string // carries base64 encoded md5sum contentSHA256Hex string // carries hex encoded sha256sum streamSha256 bool + addCrc bool + trailer http.Header // (http.Request).Trailer. Requires v4 signature. } // dumpHTTP - dump HTTP request and response. @@ -581,6 +594,17 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } } + if metadata.addCrc { + if metadata.trailer == nil { + metadata.trailer = make(http.Header, 1) + } + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { + // Update trailer when done. + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + } // Instantiate a new request. var req *http.Request req, err = c.newRequest(ctx, method, metadata) @@ -592,6 +616,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, err } + // Initiate the request. res, err = c.do(req) if err != nil { @@ -632,7 +657,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // code dictates invalid region, we can retry the request // with the new region. // - // Additionally we should only retry if bucketLocation and custom + // Additionally, we should only retry if bucketLocation and custom // region is empty. if c.region == "" { switch errResponse.Code { @@ -814,9 +839,12 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request // Add signature version '2' authorization header. req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) case metadata.streamSha256 && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. + if len(metadata.trailer) > 0 { + req.Trailer = metadata.trailer + } + // Streaming signature is used by default for a PUT object request. + // Additionally, we also look if the initialized client is secure, + // if yes then we don't need to perform streaming signature. req = signer.StreamingSignV4(req, accessKeyID, secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) default: @@ -824,11 +852,17 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request shaHeader := unsignedPayload if metadata.contentSHA256Hex != "" { shaHeader = metadata.contentSHA256Hex + if len(metadata.trailer) > 0 { + // Sanity check, we should not end up here if upstream is sane. + return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") + } + } else if len(metadata.trailer) > 0 { + shaHeader = unsignedPayloadTrailer } req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) } // Return request. diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index dee83b870cc..1c3e8e6f646 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -46,6 +46,10 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 // we don't want to sign the request payload const unsignedPayload = "UNSIGNED-PAYLOAD" +// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload, but have a trailer. +const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + // Total number of parallel workers used for multipart operation. const totalWorkers = 4 @@ -96,6 +100,9 @@ const ( minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" // Header indicates last legalhold update time on source minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" - - minIOForceDelete = "x-minio-force-delete" + minIOForceDelete = "x-minio-force-delete" + // Header indicates delete marker replication request can be sent by source now. + minioTgtReplicationReady = "X-Minio-Replication-Ready" + // Header asks if delete marker replication request can be sent by source now. + isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" ) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index f6132e79a03..207a3870207 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -88,8 +88,19 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - streamSha256 := true - return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse, streamSha256, nil) + p := uploadPartParams{ + bucketName: bucket, + objectName: object, + uploadID: uploadID, + reader: data, + partNumber: partID, + md5Base64: md5Base64, + sha256Hex: sha256Hex, + size: size, + sse: sse, + streamSha256: true, + } + return c.uploadPart(ctx, p) } // ListObjectParts - List uploaded parts of an incomplete upload.x diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 483b5cb115e..e86e142e552 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -2054,10 +2054,10 @@ func testPutObjectWithChecksums() { ChecksumSHA1 string ChecksumSHA256 string }{ - {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE(), ChecksumCRC32: "yXTVFQ=="}, - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "zXqj7Q=="}, - {header: "x-amz-checksum-sha1", hasher: sha1.New(), ChecksumSHA1: "SwmAs3F75Sw/sE4dHehkvYtn9UE="}, - {header: "x-amz-checksum-sha256", hasher: sha256.New(), ChecksumSHA256: "8Tlu9msuw/cpmWNEnQx97axliBjiE6gK1doiY0N9WuA="}, + {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()}, + {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, + {header: "x-amz-checksum-sha1", hasher: sha1.New()}, + {header: "x-amz-checksum-sha256", hasher: sha256.New()}, } for i, test := range tests { @@ -2113,10 +2113,10 @@ func testPutObjectWithChecksums() { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) // Read the data back gopts := minio.GetObjectOptions{Checksum: true} @@ -2132,10 +2132,10 @@ func testPutObjectWithChecksums() { return } - cmpChecksum(st.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(st.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(st.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(st.ChecksumCRC32C, test.ChecksumCRC32C) + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) if st.Size != int64(bufSize) { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) @@ -4204,10 +4204,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) return } - if err := policy.SetKeyStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) return @@ -4216,10 +4212,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) return } - if err := policy.SetContentTypeStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) return diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index 12ed0842770..e964b521737 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -19,6 +19,7 @@ package credentials import ( "bytes" + "crypto/sha256" "encoding/hex" "encoding/xml" "errors" @@ -31,7 +32,6 @@ import ( "time" "github.com/minio/minio-go/v7/pkg/signer" - sha256 "github.com/minio/sha256-simd" ) // AssumeRoleResponse contains the result of successful AssumeRole request. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go index 6dc8e9d0525..ddccfb173fe 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -31,18 +31,17 @@ package credentials // will cache that Provider for all calls to IsExpired(), until Retrieve is // called again after IsExpired() is true. // -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) // +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } type Chain struct { Providers []Provider curr Provider diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index 6b93a27fbd4..af6104967c7 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -65,10 +65,11 @@ type Provider interface { // provider's struct. // // Example: -// type IAMCredentialProvider struct { -// Expiry -// ... -// } +// +// type IAMCredentialProvider struct { +// Expiry +// ... +// } type Expiry struct { // The date/time when to expire on expiration time.Time diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json new file mode 100644 index 00000000000..afbfad559ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json @@ -0,0 +1,7 @@ +{ + "Version": 1, + "SessionToken": "token", + "AccessKeyId": "accessKey", + "SecretAccessKey": "secret", + "Expiration": "9999-04-27T16:02:25.000Z" +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample index 7fc91d9d204..e2dc1bfecb1 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -10,3 +10,6 @@ aws_secret_access_key = secret [with_colon] aws_access_key_id: accessKey aws_secret_access_key: secret + +[with_process] +credential_process = /bin/cat credentials.json diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go index 0c94477b752..fbfb105491d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -28,35 +28,33 @@ // // Example of using the environment variable credentials. // -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } // // Example of forcing credentials to expire and be refreshed on the next Get(). // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. // -// -// Custom Provider +// # Custom Provider // // Each Provider built into this package also provides a helper method to generate // a Credentials pointer setup with the provider. To use a custom Provider just // create a type which satisfies the Provider interface and pass it to the // NewCredentials method. // -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} // +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index cbdcfe25653..da09707e339 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -18,17 +18,33 @@ package credentials import ( + "encoding/json" + "errors" "os" + "os/exec" "path/filepath" + "strings" + "time" ini "gopkg.in/ini.v1" ) +// A externalProcessCredentials stores the output of a credential_process +type externalProcessCredentials struct { + Version int + SessionToken string + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + Expiration time.Time +} + // A FileAWSCredentials retrieves credentials from the current user's home // directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type FileAWSCredentials struct { + Expiry + // Path to the shared credentials file. // // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the @@ -89,6 +105,33 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { // Default to empty string if not found. token := iniProfile.Key("aws_session_token") + // If credential_process is defined, obtain credentials by executing + // the external process + credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) + if credentialProcess != "" { + args := strings.Fields(credentialProcess) + if len(args) <= 1 { + return Value{}, errors.New("invalid credential process args") + } + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.Output() + if err != nil { + return Value{}, err + } + var externalProcessCredentials externalProcessCredentials + err = json.Unmarshal([]byte(out), &externalProcessCredentials) + if err != nil { + return Value{}, err + } + p.retrieved = true + p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: externalProcessCredentials.AccessKeyID, + SecretAccessKey: externalProcessCredentials.SecretAccessKey, + SessionToken: externalProcessCredentials.SessionToken, + SignerType: SignatureV4, + }, nil + } p.retrieved = true return Value{ AccessKeyID: id.String(), @@ -98,11 +141,6 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { }, nil } -// IsExpired returns if the shared credentials have expired. -func (p *FileAWSCredentials) IsExpired() bool { - return !p.retrieved -} - // loadProfiles loads from the file pointed to by shared credentials filename for profile. // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go new file mode 100644 index 00000000000..6db26c036f5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go @@ -0,0 +1,24 @@ +//go:build !fips +// +build !fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = false diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go new file mode 100644 index 00000000000..640258242f9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go @@ -0,0 +1,24 @@ +//go:build fips +// +build fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = true diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go index 06e68e7367e..163fa62b427 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -67,9 +67,9 @@ var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { // Type is the server-side-encryption method. It represents one of // the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption type Type string const ( diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go index d0a47163838..126661a9e68 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -22,14 +22,14 @@ type identity struct { PrincipalID string `json:"principalId"` } -// event bucket metadata. +// event bucket metadata. type bucketMeta struct { Name string `json:"name"` OwnerIdentity identity `json:"ownerIdentity"` ARN string `json:"arn"` } -// event object metadata. +// event object metadata. type objectMeta struct { Key string `json:"key"` Size int64 `json:"size,omitempty"` @@ -40,7 +40,7 @@ type objectMeta struct { Sequencer string `json:"sequencer"` } -// event server specific metadata. +// event server specific metadata. type eventMeta struct { SchemaVersion string `json:"s3SchemaVersion"` ConfigurationID string `json:"configurationId"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index 75a1f609621..931ca5bc284 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -21,6 +21,7 @@ import ( "encoding/xml" "errors" "fmt" + "strings" "github.com/minio/minio-go/v7/pkg/set" ) @@ -29,7 +30,8 @@ import ( type EventType string // The role of all event types are described in : -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +// +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations const ( ObjectCreatedAll EventType = "s3:ObjectCreated:*" ObjectCreatedPut = "s3:ObjectCreated:Put" @@ -87,6 +89,27 @@ func NewArn(partition, service, region, accountID, resource string) Arn { } } +var ( + // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' + ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") + // ErrInvalidArnFormat is returned when ARN string format is not valid + ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'") +) + +// NewArnFromString parses string representation of ARN into Arn object. +// Returns an error if the string format is incorrect. +func NewArnFromString(arn string) (Arn, error) { + parts := strings.Split(arn, ":") + if len(parts) != 6 { + return Arn{}, ErrInvalidArnFormat + } + if parts[0] != "arn" { + return Arn{}, ErrInvalidArnPrefix + } + + return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil +} + // String returns the string format of the ARN func (arn Arn) String() string { return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 00000000000..5838b9de995 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -0,0 +1,225 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// getUnsignedChunkLength - calculates the length of chunk metadata +func getUnsignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + crlfLen + + chunkDataSize + + crlfLen +} + +// getUSStreamLength - calculates the length of the overall stream (data + metadata) +func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getUnsignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getUnsignedChunkLength(remainingBytes) + } + streamLen += getUnsignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += crlfLen + } + + return streamLen +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + req.TransferEncoding = []string{"aws-chunked"} + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) +} + +// StreamingUSReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingUSReader struct { + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header +} + +// writeChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { + s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingUSReader) addTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// StreamingUnsignedV4 - provides chunked upload +func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { + // Set headers needed for streaming signature. + prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingUSReader{ + baseReadCloser: req.Body, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingUSReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.writeChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.writeChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addTrailer(s.trailer) + } + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingUSReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index b1296d2b176..49e999b01a3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -32,13 +32,17 @@ import ( // Reference for constants used below - // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF + trailerKVSeparator = ":" + trailerSignature = "x-amz-trailer-signature" ) // Request headers to be ignored while calculating seed signature for @@ -60,7 +64,7 @@ func getSignedChunkLength(chunkDataSize int64) int64 { } // getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { +func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { if dataLen <= 0 { return 0 } @@ -73,6 +77,15 @@ func getStreamLength(dataLen, chunkSize int64) int64 { streamLen += getSignedChunkLength(remainingBytes) } streamLen += getSignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen + } + return streamLen } @@ -91,18 +104,41 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ return strings.Join(stringToSignParts, "\n") } +// buildTrailerChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingTrailerHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + // prepareStreamingRequest - prepares a request with appropriate // headers before computing the seed signature. func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if len(req.Trailer) == 0 { + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + } else { + req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) + for k := range req.Trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + req.TransferEncoding = []string{"aws-chunked"} + } + if sessionToken != "" { req.Header.Set("X-Amz-Security-Token", sessionToken) } req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) } @@ -122,6 +158,16 @@ func buildChunkSignature(chunkData []byte, reqTime time.Time, region, return getSignature(signingKey, chunkStringToSign) } +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + // getSeedSignature - returns the seed signature for a given request. func (s *StreamingReader) setSeedSignature(req *http.Request) { // Get canonical request @@ -156,10 +202,11 @@ type StreamingReader struct { chunkNum int totalChunks int lastChunkSize int + trailer http.Header } // signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { +func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { // Compute chunk signature for next header signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, s.region, s.prevSignature, s.secretAccessKey) @@ -175,13 +222,40 @@ func (s *StreamingReader) signChunk(chunkLen int) { s.buf.Write(s.chunkBuf[:chunkLen]) // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) + if addCrLf { + s.buf.Write([]byte("\r\n")) + } // Reset chunkBufLen for next chunk read. s.chunkBufLen = 0 s.chunkNum++ } +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingReader) addSignedTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + // Compute chunk signature + signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + // setStreamingAuthHeader - builds and sets authorization header value // for streaming signature. func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { @@ -222,6 +296,11 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, lastChunkSize: int(dataLen % payloadChunkSize), } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } // Add the request headers required for chunk upload signing. @@ -272,7 +351,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { (s.chunkNum == s.totalChunks-1 && s.chunkBufLen == s.lastChunkSize) { // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) + s.signChunk(s.chunkBufLen, true) break } } @@ -289,7 +368,11 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { } // Sign the chunk and write it to s.buf. - s.signChunk(0) + s.signChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addSignedTrailer(s.trailer) + } break } return 0, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index cf7921d1f64..fa4f8c91e6c 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -162,11 +162,12 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost b // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func preStringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -189,11 +190,12 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func stringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -281,8 +283,9 @@ var resourceList = []string{ // From the Amazon docs: // // CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { // Save request URL. requestURL := req.URL diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go index a12608ebb0b..34914490c0e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -42,7 +42,6 @@ const ( ServiceTypeSTS = "sts" ) -// // Excerpts from @lsegal - // https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. // @@ -57,7 +56,6 @@ const ( // * Accept-Encoding // Some S3 servers like Hitachi Content Platform do not honor this header for signature // calculation. -// var v4IgnoredHeaders = map[string]bool{ "Accept-Encoding": true, "Authorization": true, @@ -177,12 +175,13 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // getCanonicalRequest generate a canonical request of style. // // canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// +// +// \n +// \n +// \n +// \n +// \n +// func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") canonicalRequest := strings.Join([]string{ @@ -264,11 +263,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4STS - signature v4 for STS request. func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) } // Internal function called for different service types. -func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -285,6 +284,15 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati req.Header.Set("X-Amz-Security-Token", sessionToken) } + if len(trailer) > 0 { + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.TransferEncoding = []string{"aws-chunked"} + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + } + hashedPayload := getHashedPayload(req) if serviceType == ServiceTypeSTS { // Content sha256 header is not sent with the request @@ -322,11 +330,22 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati auth := strings.Join(parts, ", ") req.Header.Set("Authorization", auth) + if len(trailer) > 0 { + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) + } return &req } // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) +} + +// SignV4Trailer sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go index b54fa4c77cf..333f1aa250f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -19,10 +19,9 @@ package signer import ( "crypto/hmac" + "crypto/sha256" "net/http" "strings" - - "github.com/minio/sha256-simd" ) // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go index d7c65af5a0f..98ae17efad3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -1,5 +1,6 @@ /* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020-2022 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +21,8 @@ import ( "encoding/xml" "io" "net/url" + "regexp" + "sort" "strings" "unicode/utf8" ) @@ -63,8 +66,17 @@ const ( maxTagCount = 50 ) +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +// borrowed from this article and also testing various ASCII characters following regex +// is supported by AWS S3 for both tags and values. +var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`) + func checkKey(key string) error { - if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { + if len(key) == 0 { + return errInvalidTagKey + } + + if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { return errInvalidTagKey } @@ -72,8 +84,10 @@ func checkKey(key string) error { } func checkValue(value string) error { - if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { - return errInvalidTagValue + if value != "" { + if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { + return errInvalidTagValue + } } return nil @@ -136,11 +150,26 @@ type tagSet struct { } func (tags tagSet) String() string { - vals := make(url.Values) - for key, value := range tags.tagMap { - vals.Set(key, value) + if len(tags.tagMap) == 0 { + return "" } - return vals.Encode() + var buf strings.Builder + keys := make([]string, 0, len(tags.tagMap)) + for k := range tags.tagMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + keyEscaped := url.QueryEscape(k) + valueEscaped := url.QueryEscape(tags.tagMap[k]) + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(keyEscaped) + buf.WriteByte('=') + buf.WriteString(valueEscaped) + } + return buf.String() } func (tags *tagSet) remove(key string) { @@ -175,7 +204,7 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error { } func (tags tagSet) toMap() map[string]string { - m := make(map[string]string) + m := make(map[string]string, len(tags.tagMap)) for key, value := range tags.tagMap { m[key] = value } @@ -188,6 +217,7 @@ func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { Tags []Tag `xml:"Tag"` }{} + tagList.Tags = make([]Tag, 0, len(tags.tagMap)) for key, value := range tags.tagMap { tagList.Tags = append(tagList.Tags, Tag{key, value}) } @@ -213,7 +243,7 @@ func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { return errTooManyTags } - m := map[string]string{} + m := make(map[string]string, len(tagList.Tags)) for _, tag := range tagList.Tags { if _, found := m[tag.Key]; found { return errDuplicateTagKey @@ -311,14 +341,49 @@ func ParseObjectXML(reader io.Reader) (*Tags, error) { return unmarshalXML(reader, true) } +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (tags *tagSet) parseTags(tgs string) (err error) { + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err1 := url.QueryUnescape(key) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + value, err1 = url.QueryUnescape(value) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + if err = tags.set(key, value, true); err != nil { + return err + } + } + return err +} + // Parse decodes HTTP query formatted string into tags which is limited by isObject. // A query formatted string is like "key1=value1&key2=value2". func Parse(s string, isObject bool) (*Tags, error) { - values, err := url.ParseQuery(s) - if err != nil { - return nil, err - } - tagging := &Tags{ TagSet: &tagSet{ tagMap: make(map[string]string), @@ -326,10 +391,8 @@ func Parse(s string, isObject bool) (*Tags, error) { }, } - for key := range values { - if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { - return nil, err - } + if err := tagging.TagSet.parseTags(s); err != nil { + return nil, err } return tagging, nil diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 7bf560304a2..4b3df19127f 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -32,12 +32,11 @@ const expirationDateFormat = "2006-01-02T15:04:05.999Z" // // Example: // -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } type policyCondition struct { matchType string condition string @@ -98,10 +97,8 @@ func (p *PostPolicy) SetKey(key string) error { // SetKeyStartsWith - Sets an object name that an policy based upload // can start with. +// Can use an empty value ("") to allow any key. func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return errInvalidArgument("Object prefix is empty.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$key", @@ -172,10 +169,8 @@ func (p *PostPolicy) SetContentType(contentType string) error { // SetContentTypeStartsWith - Sets what content-type of the object for this policy // based upload can start with. +// Can use an empty value ("") to allow any content-type. func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { - if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" { - return errInvalidArgument("No content type specified.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$Content-Type", @@ -286,10 +281,14 @@ func (p *PostPolicy) SetUserData(key string, value string) error { } // addNewPolicy - internal helper to validate adding new policies. +// Can use starts-with with an empty value ("") to allow any content within a form field. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + if policyCond.matchType == "" || policyCond.condition == "" { return errInvalidArgument("Policy fields are empty.") } + if policyCond.matchType != "starts-with" && policyCond.value == "" { + return errInvalidArgument("Policy value is empty.") + } p.conditions = append(p.conditions, policyCond) return nil } diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go index 3a4cacfe811..589c0e5498f 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -28,8 +28,10 @@ var awsS3EndpointMap = map[string]string{ "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", + "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", @@ -38,6 +40,7 @@ var awsS3EndpointMap = map[string]string{ "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", + "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index f32f84ab0ab..a8a45b1a8ce 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -20,6 +20,7 @@ package minio import ( "context" "crypto/md5" + fipssha256 "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/xml" @@ -39,6 +40,7 @@ import ( "time" md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/sha256-simd" ) @@ -520,6 +522,9 @@ func newMd5Hasher() md5simd.Hasher { } func newSHA256Hasher() md5simd.Hasher { + if encrypt.FIPS { + return &hashWrapper{Hash: fipssha256.New(), isSHA256: true} + } return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} } @@ -627,3 +632,38 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { } return false } + +// newHashReaderWrapper will hash all reads done through r. +// When r returns io.EOF the done function will be called with the sum. +func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { + return &hashReaderWrapper{ + r: r, + h: h, + done: done, + } +} + +type hashReaderWrapper struct { + r io.Reader + h hash.Hash + done func(hash []byte) +} + +// Read implements the io.Reader interface. +func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { + n, err = h.r.Read(p) + if n > 0 { + n2, err := h.h.Write(p[:n]) + if err != nil { + return 0, err + } + if n2 != n { + return 0, io.ErrShortWrite + } + } + if err == io.EOF { + // Call back + h.done(h.h.Sum(nil)) + } + return n, err +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go index ae3ebc03b97..c607a163a32 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go @@ -19,6 +19,7 @@ import ( "encoding/hex" "fmt" "net/http" + "strings" "sync" "github.com/go-kit/log" @@ -113,7 +114,12 @@ func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { hashedPassword = "$2y$10$QOauhQNbBCuQDKes6eFzPeMqBSjb7Mr5DUmpZ/VcEd00UAV/LDeSi" } - cacheKey := hex.EncodeToString(append(append([]byte(user), []byte(hashedPassword)...), []byte(pass)...)) + cacheKey := strings.Join( + []string{ + hex.EncodeToString([]byte(user)), + hex.EncodeToString([]byte(hashedPassword)), + hex.EncodeToString([]byte(pass)), + }, ":") authOk, ok := u.cache.get(cacheKey) if !ok { @@ -122,7 +128,7 @@ func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(pass)) u.bcryptMtx.Unlock() - authOk = err == nil + authOk = validUser && err == nil u.cache.set(cacheKey, authOk) } diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index d44ef3382c7..c415422357a 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -11,11 +11,15 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ## Unreleased ### Fixed +- [#33](https://github.com/thanos-io/objstore/pull/33) Tracing: Add `ContextWithTracer()` to inject the tracer into the context. +- [#34](https://github.com/thanos-io/objstore/pull/34) Fix ignored options when creating shared credential Azure client. ### Added - [#15](https://github.com/thanos-io/objstore/pull/15) Add Oracle Cloud Infrastructure Object Storage Bucket support. - [#25](https://github.com/thanos-io/objstore/pull/25) S3: Support specifying S3 storage class. +- [#32](https://github.com/thanos-io/objstore/pull/32) Swift: Support authentication using application credentials. ### Changed +- [#38](https://github.com/thanos-io/objstore/pull/38) *: Upgrade minio-go version to `v7.0.45`. ### Removed diff --git a/vendor/github.com/thanos-io/objstore/README.md b/vendor/github.com/thanos-io/objstore/README.md index 642dd18df77..e914cf0678b 100644 --- a/vendor/github.com/thanos-io/objstore/README.md +++ b/vendor/github.com/thanos-io/objstore/README.md @@ -125,7 +125,7 @@ Current object storage client implementations: | [OpenStack Swift](#openstack-swift) | Beta (working PoC) | Production Usage | yes | @FUSAKLA | | [Tencent COS](#tencent-cos) | Beta | Production Usage | no | @jojohappy,@hanjm | | [AliYun OSS](#aliyun-oss) | Beta | Production Usage | no | @shaulboozhiao,@wujinhu | -| [Baidu BOS](#baidu-bos) | Beta | Production Usage | no | ?? | +| [Baidu BOS](#baidu-bos) | Beta | Production Usage | no | @yahaa | | [Local Filesystem](#filesystem) | Stable | Testing and Demo only | yes | @bwplotka | | [Oracle Cloud Infrastructure Object Storage](#oracle-cloud-infrastructure-object-storage) | Beta | Production Usage | yes | @aarontams,@gaurav-05,@ericrrath | @@ -473,6 +473,9 @@ config: password: "" domain_id: "" domain_name: "" + application_credential_id: "" + application_credential_name: "" + application_credential_secret: "" project_id: "" project_name: "" project_domain_id: "" diff --git a/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go b/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go index d1315ad2859..26b58d24f78 100644 --- a/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go +++ b/vendor/github.com/thanos-io/objstore/exthttp/tlsconfig.go @@ -7,7 +7,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" ) // TLSConfig configures the options for TLS connections. @@ -60,7 +60,7 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } diff --git a/vendor/github.com/thanos-io/objstore/inmem.go b/vendor/github.com/thanos-io/objstore/inmem.go index f90ed6d90c6..1b0a6439869 100644 --- a/vendor/github.com/thanos-io/objstore/inmem.go +++ b/vendor/github.com/thanos-io/objstore/inmem.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "io" - "io/ioutil" "sort" "strings" "sync" @@ -112,7 +111,7 @@ func (b *InMemBucket) Get(_ context.Context, name string) (io.ReadCloser, error) return nil, errNotFound } - return ioutil.NopCloser(bytes.NewReader(file)), nil + return io.NopCloser(bytes.NewReader(file)), nil } // GetRange returns a new range reader for the given object name and range. @@ -129,15 +128,15 @@ func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64 } if int64(len(file)) < off { - return ioutil.NopCloser(bytes.NewReader(nil)), nil + return io.NopCloser(bytes.NewReader(nil)), nil } if length == -1 { - return ioutil.NopCloser(bytes.NewReader(file[off:])), nil + return io.NopCloser(bytes.NewReader(file[off:])), nil } if length <= 0 { - return ioutil.NopCloser(bytes.NewReader(nil)), errors.New("length cannot be smaller or equal 0") + return io.NopCloser(bytes.NewReader(nil)), errors.New("length cannot be smaller or equal 0") } if int64(len(file)) <= off+length { @@ -145,7 +144,7 @@ func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64 length = int64(len(file)) - off } - return ioutil.NopCloser(bytes.NewReader(file[off : off+length])), nil + return io.NopCloser(bytes.NewReader(file[off : off+length])), nil } // Exists checks if the given directory exists in memory. @@ -171,7 +170,7 @@ func (b *InMemBucket) Attributes(_ context.Context, name string) (ObjectAttribut func (b *InMemBucket) Upload(_ context.Context, name string, r io.Reader) error { b.mtx.Lock() defer b.mtx.Unlock() - body, err := ioutil.ReadAll(r) + body, err := io.ReadAll(r) if err != nil { return err } diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index 9ba2ea2d6ae..9a07f2fdf06 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/efficientgo/tools/core/pkg/logerrcapture" + "github.com/efficientgo/core/logerrcapture" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go index c2ac65864b5..958068f7ca3 100644 --- a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go +++ b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go @@ -43,7 +43,7 @@ func getContainerClient(conf Config) (*azblob.ContainerClient, error) { if err != nil { return nil, err } - containerClient, err := azblob.NewContainerClientWithSharedKey(containerURL, cred, nil) + containerClient, err := azblob.NewContainerClientWithSharedKey(containerURL, cred, opt) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go index 8189168e807..0c75745d0b3 100644 --- a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go +++ b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go @@ -7,11 +7,10 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" - "github.com/efficientgo/tools/core/pkg/errcapture" + "github.com/efficientgo/core/errcapture" "github.com/pkg/errors" "gopkg.in/yaml.v2" @@ -67,7 +66,7 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt return nil } - files, err := ioutil.ReadDir(absDir) + files, err := os.ReadDir(absDir) if err != nil { return err } diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go index 3ce8af950cc..1208239a45b 100644 --- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go +++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "os" "runtime" @@ -17,7 +16,7 @@ import ( "testing" "time" - "github.com/efficientgo/tools/core/pkg/logerrcapture" + "github.com/efficientgo/core/logerrcapture" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/minio/minio-go/v7" @@ -286,7 +285,7 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B } case SSEC: - key, err := ioutil.ReadFile(config.SSEConfig.EncryptionKey) + key, err := os.ReadFile(config.SSEConfig.EncryptionKey) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go index 03f75012b31..f30c655dc74 100644 --- a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go +++ b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/efficientgo/tools/core/pkg/errcapture" + "github.com/efficientgo/core/errcapture" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/ncw/swift" @@ -40,27 +40,30 @@ var DefaultConfig = Config{ } type Config struct { - AuthVersion int `yaml:"auth_version"` - AuthUrl string `yaml:"auth_url"` - Username string `yaml:"username"` - UserDomainName string `yaml:"user_domain_name"` - UserDomainID string `yaml:"user_domain_id"` - UserId string `yaml:"user_id"` - Password string `yaml:"password"` - DomainId string `yaml:"domain_id"` - DomainName string `yaml:"domain_name"` - ProjectID string `yaml:"project_id"` - ProjectName string `yaml:"project_name"` - ProjectDomainID string `yaml:"project_domain_id"` - ProjectDomainName string `yaml:"project_domain_name"` - RegionName string `yaml:"region_name"` - ContainerName string `yaml:"container_name"` - ChunkSize int64 `yaml:"large_object_chunk_size"` - SegmentContainerName string `yaml:"large_object_segments_container_name"` - Retries int `yaml:"retries"` - ConnectTimeout model.Duration `yaml:"connect_timeout"` - Timeout model.Duration `yaml:"timeout"` - UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` + AuthVersion int `yaml:"auth_version"` + AuthUrl string `yaml:"auth_url"` + Username string `yaml:"username"` + UserDomainName string `yaml:"user_domain_name"` + UserDomainID string `yaml:"user_domain_id"` + UserId string `yaml:"user_id"` + Password string `yaml:"password"` + DomainId string `yaml:"domain_id"` + DomainName string `yaml:"domain_name"` + ApplicationCredentialID string `yaml:"application_credential_id"` + ApplicationCredentialName string `yaml:"application_credential_name"` + ApplicationCredentialSecret string `yaml:"application_credential_secret"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + ProjectDomainID string `yaml:"project_domain_id"` + ProjectDomainName string `yaml:"project_domain_name"` + RegionName string `yaml:"region_name"` + ContainerName string `yaml:"container_name"` + ChunkSize int64 `yaml:"large_object_chunk_size"` + SegmentContainerName string `yaml:"large_object_segments_container_name"` + Retries int `yaml:"retries"` + ConnectTimeout model.Duration `yaml:"connect_timeout"` + Timeout model.Duration `yaml:"timeout"` + UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` } func parseConfig(conf []byte) (*Config, error) { @@ -76,25 +79,28 @@ func configFromEnv() (*Config, error) { } config := Config{ - AuthVersion: c.AuthVersion, - AuthUrl: c.AuthUrl, - Password: c.ApiKey, - Username: c.UserName, - UserId: c.UserId, - DomainId: c.DomainId, - DomainName: c.Domain, - ProjectID: c.TenantId, - ProjectName: c.Tenant, - ProjectDomainID: c.TenantDomainId, - ProjectDomainName: c.TenantDomain, - RegionName: c.Region, - ContainerName: os.Getenv("OS_CONTAINER_NAME"), - ChunkSize: DefaultConfig.ChunkSize, - SegmentContainerName: os.Getenv("SWIFT_SEGMENTS_CONTAINER_NAME"), - Retries: c.Retries, - ConnectTimeout: model.Duration(c.ConnectTimeout), - Timeout: model.Duration(c.Timeout), - UseDynamicLargeObjects: false, + AuthVersion: c.AuthVersion, + AuthUrl: c.AuthUrl, + Username: c.UserName, + UserId: c.UserId, + Password: c.ApiKey, + DomainId: c.DomainId, + DomainName: c.Domain, + ApplicationCredentialID: c.ApplicationCredentialId, + ApplicationCredentialName: c.ApplicationCredentialName, + ApplicationCredentialSecret: c.ApplicationCredentialSecret, + ProjectID: c.TenantId, + ProjectName: c.Tenant, + ProjectDomainID: c.TenantDomainId, + ProjectDomainName: c.TenantDomain, + RegionName: c.Region, + ContainerName: os.Getenv("OS_CONTAINER_NAME"), + ChunkSize: DefaultConfig.ChunkSize, + SegmentContainerName: os.Getenv("SWIFT_SEGMENTS_CONTAINER_NAME"), + Retries: c.Retries, + ConnectTimeout: model.Duration(c.ConnectTimeout), + Timeout: model.Duration(c.Timeout), + UseDynamicLargeObjects: false, } if os.Getenv("SWIFT_CHUNK_SIZE") != "" { var err error @@ -111,21 +117,24 @@ func configFromEnv() (*Config, error) { func connectionFromConfig(sc *Config) *swift.Connection { connection := swift.Connection{ - Domain: sc.DomainName, - DomainId: sc.DomainId, - UserName: sc.Username, - UserId: sc.UserId, - ApiKey: sc.Password, - AuthUrl: sc.AuthUrl, - Retries: sc.Retries, - Region: sc.RegionName, - AuthVersion: sc.AuthVersion, - Tenant: sc.ProjectName, - TenantId: sc.ProjectID, - TenantDomain: sc.ProjectDomainName, - TenantDomainId: sc.ProjectDomainID, - ConnectTimeout: time.Duration(sc.ConnectTimeout), - Timeout: time.Duration(sc.Timeout), + AuthVersion: sc.AuthVersion, + AuthUrl: sc.AuthUrl, + UserName: sc.Username, + UserId: sc.UserId, + ApiKey: sc.Password, + DomainId: sc.DomainId, + Domain: sc.DomainName, + ApplicationCredentialId: sc.ApplicationCredentialID, + ApplicationCredentialName: sc.ApplicationCredentialName, + ApplicationCredentialSecret: sc.ApplicationCredentialSecret, + TenantId: sc.ProjectID, + Tenant: sc.ProjectName, + TenantDomain: sc.ProjectDomainName, + TenantDomainId: sc.ProjectDomainID, + Region: sc.RegionName, + Retries: sc.Retries, + ConnectTimeout: time.Duration(sc.ConnectTimeout), + Timeout: time.Duration(sc.Timeout), } return &connection } diff --git a/vendor/github.com/thanos-io/objstore/testing.go b/vendor/github.com/thanos-io/objstore/testing.go index 3a38bcef229..d750142af1d 100644 --- a/vendor/github.com/thanos-io/objstore/testing.go +++ b/vendor/github.com/thanos-io/objstore/testing.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math/rand" "sort" "strings" @@ -16,7 +15,7 @@ import ( "testing" "time" - "github.com/efficientgo/tools/core/pkg/testutil" + "github.com/efficientgo/core/testutil" ) func CreateTemporaryTestBucketName(t testing.TB) string { @@ -107,7 +106,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rc1, err := bkt.Get(ctx, "id1/obj_1.some") testutil.Ok(t, err) defer func() { testutil.Ok(t, rc1.Close()) }() - content, err := ioutil.ReadAll(rc1) + content, err := io.ReadAll(rc1) testutil.Ok(t, err) testutil.Equals(t, "@test-data@", string(content)) @@ -119,7 +118,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rc2, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, 3) testutil.Ok(t, err) defer func() { testutil.Ok(t, rc2.Close()) }() - content, err = ioutil.ReadAll(rc2) + content, err = io.ReadAll(rc2) testutil.Ok(t, err) testutil.Equals(t, "tes", string(content)) @@ -127,7 +126,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rcUnspecifiedLen, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, -1) testutil.Ok(t, err) defer func() { testutil.Ok(t, rcUnspecifiedLen.Close()) }() - content, err = ioutil.ReadAll(rcUnspecifiedLen) + content, err = io.ReadAll(rcUnspecifiedLen) testutil.Ok(t, err) testutil.Equals(t, "test-data@", string(content)) @@ -142,7 +141,7 @@ func AcceptanceTest(t *testing.T, bkt Bucket) { rcLength, err := bkt.GetRange(ctx, "id1/obj_1.some", 3, 9999) testutil.Ok(t, err) defer func() { testutil.Ok(t, rcLength.Close()) }() - content, err = ioutil.ReadAll(rcLength) + content, err = io.ReadAll(rcLength) testutil.Ok(t, err) testutil.Equals(t, "st-data@", string(content)) diff --git a/vendor/github.com/thanos-io/objstore/tlsconfig.go b/vendor/github.com/thanos-io/objstore/tlsconfig.go index f705fa31b6b..81c4f08736a 100644 --- a/vendor/github.com/thanos-io/objstore/tlsconfig.go +++ b/vendor/github.com/thanos-io/objstore/tlsconfig.go @@ -7,7 +7,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" ) // NewTLSConfig creates a new tls.Config from the given TLSConfig. @@ -46,7 +46,7 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } diff --git a/vendor/github.com/thanos-io/objstore/tracing/tracing.go b/vendor/github.com/thanos-io/objstore/tracing/tracing.go index f0214206958..9beb2bd8133 100644 --- a/vendor/github.com/thanos-io/objstore/tracing/tracing.go +++ b/vendor/github.com/thanos-io/objstore/tracing/tracing.go @@ -29,6 +29,11 @@ type Tracer interface { GetTraceIDFromSpanContext(ctx opentracing.SpanContext) (string, bool) } +// ContextWithTracer returns a new `context.Context` that holds a reference to given opentracing.Tracer. +func ContextWithTracer(ctx context.Context, tracer opentracing.Tracer) context.Context { + return context.WithValue(ctx, tracerKey, tracer) +} + // tracerFromContext extracts opentracing.Tracer from the given context. func tracerFromContext(ctx context.Context) opentracing.Tracer { val := ctx.Value(tracerKey) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 7886e5d8693..5edf27f2423 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -402,3 +402,34 @@ func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucke level.Info(logger).Log("msg", "block has been marked for no compaction", "block", id) return nil } + +// MarkForNoDownsample creates a file which marks block to be not downsampled. +func MarkForNoDownsample(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason metadata.NoDownsampleReason, details string, markedForNoDownsample prometheus.Counter) error { + m := path.Join(id.String(), metadata.NoDownsampleMarkFilename) + noDownsampleMarkExists, err := bkt.Exists(ctx, m) + if err != nil { + return errors.Wrapf(err, "check exists %s in bucket", m) + } + if noDownsampleMarkExists { + level.Warn(logger).Log("msg", "requested to mark for no deletion, but file already exists; this should not happen; investigate", "err", errors.Errorf("file %s already exists in bucket", m)) + return nil + } + noDownsampleMark, err := json.Marshal(metadata.NoDownsampleMark{ + ID: id, + Version: metadata.NoDownsampleMarkVersion1, + + NoDownsampleTime: time.Now().Unix(), + Reason: reason, + Details: details, + }) + if err != nil { + return errors.Wrap(err, "json encode no downsample mark") + } + + if err := bkt.Upload(ctx, m, bytes.NewBuffer(noDownsampleMark)); err != nil { + return errors.Wrapf(err, "upload file %s to bucket", m) + } + markedForNoDownsample.Inc() + level.Info(logger).Log("msg", "block has been marked for no downsample", "block", id) + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 3a228f994f2..a87f74a57a6 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -80,6 +80,9 @@ const ( // MarkedForNoCompactionMeta is label for blocks which are loaded but also marked for no compaction. This label is also counted in `loaded` label metric. MarkedForNoCompactionMeta = "marked-for-no-compact" + // MarkedForNoDownsampleMeta is label for blocks which are loaded but also marked for no downsample. This label is also counted in `loaded` label metric. + MarkedForNoDownsampleMeta = "marked-for-no-downsample" + // Modified label values. replicaRemovedMeta = "replica-label-removed" ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go index 8af9a5a1f87..83273eb3430 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go @@ -24,11 +24,15 @@ const ( // NoCompactMarkFilename is the known json filename for optional file storing details about why block has to be excluded from compaction. // If such file is present in block dir, it means the block has to excluded from compaction (both vertical and horizontal) or rewrite (e.g deletions). NoCompactMarkFilename = "no-compact-mark.json" - + // NoDownsampleMarkFilename is the known json filenanme for optional file storing details about why block has to be excluded from downsampling. + // If such file is present in block dir, it means the block has to be excluded from downsampling. + NoDownsampleMarkFilename = "no-downsample-mark.json" // DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos. DeletionMarkVersion1 = 1 // NoCompactMarkVersion1 is the version of no-compact-mark file supported by Thanos. NoCompactMarkVersion1 = 1 + // NoDownsampleVersion1 is the version of no-downsample-mark file supported by Thanos. + NoDownsampleMarkVersion1 = 1 ) var ( @@ -62,9 +66,14 @@ func (m *DeletionMark) markerFilename() string { return DeletionMarkFilename } // NoCompactReason is a reason for a block to be excluded from compaction. type NoCompactReason string +// NoDownsampleReason is a reason for a block to be excluded from downsample. +type NoDownsampleReason string + const ( // ManualNoCompactReason is a custom reason of excluding from compaction that should be added when no-compact mark is added for unknown/user specified reason. ManualNoCompactReason NoCompactReason = "manual" + // ManualNoDownsampleReason is a custom reason of excluding from downsample that should be added when no-downsample mark is added for unknown/user specified reason. + ManualNoDownsampleReason NoDownsampleReason = "manual" // IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424) // This reason can be ignored when vertical block sharding will be implemented. IndexSizeExceedingNoCompactReason = "index-size-exceeding" @@ -88,6 +97,22 @@ type NoCompactMark struct { func (n *NoCompactMark) markerFilename() string { return NoCompactMarkFilename } +// NoDownsampleMark marker stores reason of block being excluded from downsample if needed. +type NoDownsampleMark struct { + // ID of the tsdb block. + ID ulid.ULID `json:"id"` + // Version of the file. + Version int `json:"version"` + // Details is a human readable string giving details of reason. + Details string `json:"details,omitempty"` + + // NoDownsampleTime is a unix timestamp of when the block was marked for no downsample. + NoDownsampleTime int64 `json:"no_downsample_time"` + Reason NoDownsampleReason `json:"reason"` +} + +func (n *NoDownsampleMark) markerFilename() string { return NoDownsampleMarkFilename } + // ReadMarker reads the given mark file from

/.json in bucket. func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.InstrumentedBucketReader, dir string, marker Marker) error { markerFile := path.Join(dir, marker.markerFilename()) @@ -113,6 +138,10 @@ func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.Instrumente if version := marker.(*NoCompactMark).Version; version != NoCompactMarkVersion1 { return errors.Errorf("unexpected no-compact-mark file version %d, expected %d", version, NoCompactMarkVersion1) } + case NoDownsampleMarkFilename: + if version := marker.(*NoDownsampleMark).Version; version != NoDownsampleMarkVersion1 { + return errors.Errorf("unexpected no-downsample-mark file version %d, expected %d", version, NoDownsampleMarkVersion1) + } case DeletionMarkFilename: if version := marker.(*DeletionMark).Version; version != DeletionMarkVersion1 { return errors.Errorf("unexpected deletion-mark file version %d, expected %d", version, DeletionMarkVersion1) diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 7731eab877d..4aa551ae3e9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -4,11 +4,13 @@ package downsample import ( + "context" "fmt" "math" "math/rand" "os" "path/filepath" + "sync" "time" "github.com/go-kit/log" @@ -22,7 +24,10 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/tsdbutil" + "golang.org/x/sync/errgroup" + "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/runutil" @@ -734,3 +739,106 @@ func SamplesFromTSDBSamples(samples []tsdbutil.Sample) []sample { } return res } + +// GatherNoDownsampleMarkFilter is a block.Fetcher filter that passes all metas. +// While doing it, it gathers all no-downsample-mark.json markers. +type GatherNoDownsampleMarkFilter struct { + logger log.Logger + bkt objstore.InstrumentedBucketReader + noDownsampleMarkedMap map[ulid.ULID]*metadata.NoDownsampleMark + concurrency int + mtx sync.Mutex +} + +// NewGatherNoDownsampleMarkFilter creates GatherNoDownsampleMarkFilter. +func NewGatherNoDownsampleMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader) *GatherNoDownsampleMarkFilter { + return &GatherNoDownsampleMarkFilter{ + logger: logger, + bkt: bkt, + concurrency: 1, + } +} + +// NoDownsampleMarkedBlocks returns block ids that were marked for no downsample. +func (f *GatherNoDownsampleMarkFilter) NoDownsampleMarkedBlocks() map[ulid.ULID]*metadata.NoDownsampleMark { + f.mtx.Lock() + copiedNoDownsampleMarked := make(map[ulid.ULID]*metadata.NoDownsampleMark, len(f.noDownsampleMarkedMap)) + for k, v := range f.noDownsampleMarkedMap { + copiedNoDownsampleMarked[k] = v + } + f.mtx.Unlock() + + return copiedNoDownsampleMarked +} + +// TODO (@rohitkochhar): reduce code duplication here by combining +// this code with that of GatherNoCompactionMarkFilter +// Filter passes all metas, while gathering no downsample markers. +func (f *GatherNoDownsampleMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced block.GaugeVec, modified block.GaugeVec) error { + f.mtx.Lock() + f.noDownsampleMarkedMap = make(map[ulid.ULID]*metadata.NoDownsampleMark) + f.mtx.Unlock() + + // Make a copy of block IDs to check, in order to avoid concurrency issues + // between the scheduler and workers. + blockIDs := make([]ulid.ULID, 0, len(metas)) + for id := range metas { + blockIDs = append(blockIDs, id) + } + + var ( + eg errgroup.Group + ch = make(chan ulid.ULID, f.concurrency) + ) + + for i := 0; i < f.concurrency; i++ { + eg.Go(func() error { + var lastErr error + for id := range ch { + m := &metadata.NoDownsampleMark{} + + if err := metadata.ReadMarker(ctx, f.logger, f.bkt, id.String(), m); err != nil { + if errors.Cause(err) == metadata.ErrorMarkerNotFound { + continue + } + if errors.Cause(err) == metadata.ErrorUnmarshalMarker { + level.Warn(f.logger).Log("msg", "found partial no-downsample-mark.json; if we will see it happening often for the same block, consider manually deleting no-downsample-mark.json from the object storage", "block", id, "err", err) + continue + } + // Remember the last error and continue draining the channel. + lastErr = err + continue + } + + f.mtx.Lock() + f.noDownsampleMarkedMap[id] = m + f.mtx.Unlock() + synced.WithLabelValues(block.MarkedForNoDownsampleMeta).Inc() + } + + return lastErr + }) + } + + // Workers scheduled, distribute blocks. + eg.Go(func() error { + defer close(ch) + + for _, id := range blockIDs { + select { + case ch <- id: + // Nothing to do. + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil + }) + + if err := eg.Wait(); err != nil { + return errors.Wrap(err, "filter blocks marked for no downsample") + } + + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index a7a5d3abd96..b1fbd898a25 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -37,6 +37,8 @@ import ( "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/index" "golang.org/x/sync/errgroup" + + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -101,6 +103,9 @@ const ( minBlockSyncConcurrency = 1 enableChunkHashCalculation = true + + // SeriesBatchSize is the default batch size when fetching series from object storage. + SeriesBatchSize = 10000 ) var ( @@ -137,6 +142,7 @@ type bucketStoreMetrics struct { seriesFetchDuration prometheus.Histogram postingsFetchDuration prometheus.Histogram + chunkFetchDuration prometheus.Histogram } func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { @@ -168,21 +174,25 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { }) m.seriesDataTouched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_touched", - Help: "How many items of a data type in a block were touched for a single series request.", + Name: "thanos_bucket_store_series_data_touched", + Help: "Number of items of a data type touched to fulfill a single Store API series request.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesDataFetched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_fetched", - Help: "How many items of a data type in a block were fetched for a single series request.", + Name: "thanos_bucket_store_series_data_fetched", + Help: "Number of items of a data type retrieved to fulfill a single Store API series request.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesDataSizeTouched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_size_touched_bytes", - Help: "Size of all items of a data type in a block were touched for a single series request.", + Name: "thanos_bucket_store_series_data_size_touched_bytes", + Help: "Total size of items of a data type touched to fulfill a single Store API series request in Bytes.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesDataSizeFetched = promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{ - Name: "thanos_bucket_store_series_data_size_fetched_bytes", - Help: "Size of all items of a data type in a block were fetched for a single series request.", + Name: "thanos_bucket_store_series_data_size_fetched_bytes", + Help: "Total size of items of a data type fetched to fulfill a single Store API series request in Bytes.", + Objectives: map[float64]float64{0.50: 0.1, 0.95: 0.1, 0.99: 0.001}, }, []string{"data_type"}) m.seriesBlocksQueried = promauto.With(reg).NewSummary(prometheus.SummaryOpts{ @@ -271,6 +281,12 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, }) + m.chunkFetchDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_chunks_fetch_duration_seconds", + Help: "The total time spent fetching chunks within a single request a store gateway.", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + }) + m.emptyPostingCount = promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "thanos_bucket_store_empty_postings_total", Help: "Total number of empty postings when fetching block series.", @@ -301,6 +317,7 @@ type BucketStore struct { indexReaderPool *indexheader.ReaderPool buffers sync.Pool chunkPool pool.Bytes + seriesBatchSize int // Sets of blocks that have the same labels. They are indexed by a hash over their label set. mtx sync.RWMutex @@ -415,6 +432,12 @@ func WithChunkHashCalculation(enableChunkHashCalculation bool) BucketStoreOption } } +func WithSeriesBatchSize(seriesBatchSize int) BucketStoreOption { + return func(s *BucketStore) { + s.seriesBatchSize = seriesBatchSize + } +} + // NewBucketStore creates a new bucket backed store that implements the store API against // an object store bucket. It is optimized to work against high latency backends. func NewBucketStore( @@ -456,6 +479,7 @@ func NewBucketStore( postingOffsetsInMemSampling: postingOffsetsInMemSampling, enableSeriesResponseHints: enableSeriesResponseHints, enableChunkHashCalculation: enableChunkHashCalculation, + seriesBatchSize: SeriesBatchSize, } for _, option := range options { @@ -782,140 +806,218 @@ type seriesEntry struct { chks []storepb.AggrChunk } -type bucketSeriesSet struct { - set []seriesEntry - i int - err error -} +// blockSeriesClient is a storepb.Store_SeriesClient for a +// single TSDB block in object storage. +type blockSeriesClient struct { + grpc.ClientStream + ctx context.Context + logger log.Logger + extLset labels.Labels + + mint int64 + maxt int64 + indexr *bucketIndexReader + chunkr *bucketChunkReader + loadAggregates []storepb.Aggr + chunksLimiter ChunksLimiter + bytesLimiter BytesLimiter + + skipChunks bool + shardMatcher *storepb.ShardMatcher + calculateChunkHash bool + chunkFetchDuration prometheus.Histogram + + // Internal state. + i uint64 + postings []storage.SeriesRef + chkMetas []chunks.Meta + lset labels.Labels + symbolizedLset []symbolizedLabel + entries []seriesEntry + hasMorePostings bool + batchSize int +} + +func newBlockSeriesClient( + ctx context.Context, + logger log.Logger, + b *bucketBlock, + req *storepb.SeriesRequest, + limiter ChunksLimiter, + bytesLimiter BytesLimiter, + shardMatcher *storepb.ShardMatcher, + calculateChunkHash bool, + batchSize int, + chunkFetchDuration prometheus.Histogram, +) *blockSeriesClient { + var chunkr *bucketChunkReader + if !req.SkipChunks { + chunkr = b.chunkReader() + } -func newBucketSeriesSet(set []seriesEntry) *bucketSeriesSet { - return &bucketSeriesSet{ - set: set, - i: -1, + return &blockSeriesClient{ + ctx: ctx, + logger: logger, + extLset: b.extLset, + mint: req.MinTime, + maxt: req.MaxTime, + indexr: b.indexReader(), + chunkr: chunkr, + chunksLimiter: limiter, + bytesLimiter: bytesLimiter, + skipChunks: req.SkipChunks, + chunkFetchDuration: chunkFetchDuration, + + loadAggregates: req.Aggregates, + shardMatcher: shardMatcher, + calculateChunkHash: calculateChunkHash, + hasMorePostings: true, + batchSize: batchSize, } } -func (s *bucketSeriesSet) Next() bool { - if s.i >= len(s.set)-1 { - return false +func (b *blockSeriesClient) Close() { + if !b.skipChunks { + runutil.CloseWithLogOnErr(b.logger, b.chunkr, "series block") } - s.i++ - return true -} -func (s *bucketSeriesSet) At() (labels.Labels, []storepb.AggrChunk) { - return s.set[s.i].lset, s.set[s.i].chks + runutil.CloseWithLogOnErr(b.logger, b.indexr, "series block") } -func (s *bucketSeriesSet) Err() error { - return s.err +func (b *blockSeriesClient) MergeStats(stats *queryStats) *queryStats { + stats = stats.merge(b.indexr.stats) + if !b.skipChunks { + stats = stats.merge(b.chunkr.stats) + } + return stats } -// blockSeries returns series matching given matchers, that have some data in given time range. -func blockSeries( - ctx context.Context, - extLset labels.Labels, - indexr *bucketIndexReader, - chunkr *bucketChunkReader, +func (b *blockSeriesClient) ExpandPostings( matchers []*labels.Matcher, - chunksLimiter ChunksLimiter, seriesLimiter SeriesLimiter, - bytesLimiter BytesLimiter, // Rate limiter for used bytes. - skipChunks bool, - minTime, maxTime int64, - loadAggregates []storepb.Aggr, - shardMatcher *storepb.ShardMatcher, - emptyPostingsCount prometheus.Counter, - calculateChunkHash bool, -) (storepb.SeriesSet, *queryStats, error) { - ps, err := indexr.ExpandedPostings(ctx, matchers, bytesLimiter) +) error { + ps, err := b.indexr.ExpandedPostings(b.ctx, matchers, b.bytesLimiter) if err != nil { - return nil, nil, errors.Wrap(err, "expanded matching posting") + return errors.Wrap(err, "expanded matching posting") } if len(ps) == 0 { - emptyPostingsCount.Inc() - return storepb.EmptySeriesSet(), indexr.stats, nil + return nil } - // Reserve series seriesLimiter if err := seriesLimiter.Reserve(uint64(len(ps))); err != nil { - return nil, nil, errors.Wrap(err, "exceeded series limit") + return errors.Wrap(err, "exceeded series limit") } - // Preload all series index data. - // TODO(bwplotka): Consider not keeping all series in memory all the time. - // TODO(bwplotka): Do lazy loading in one step as `ExpandingPostings` method. - if err := indexr.PreloadSeries(ctx, ps, bytesLimiter); err != nil { - return nil, nil, errors.Wrap(err, "preload series") + b.postings = ps + if b.batchSize > len(ps) { + b.batchSize = len(ps) } + b.entries = make([]seriesEntry, 0, b.batchSize) + return nil +} - // Transform all series into the response types and mark their relevant chunks - // for preloading. - var ( - res []seriesEntry - symbolizedLset []symbolizedLabel - lset labels.Labels - chks []chunks.Meta - ) +func (b *blockSeriesClient) Recv() (*storepb.SeriesResponse, error) { + for len(b.entries) == 0 && b.hasMorePostings { + if err := b.nextBatch(); err != nil { + return nil, err + } + } + + if len(b.entries) == 0 { + if b.chunkr != nil { + b.chunkFetchDuration.Observe(b.chunkr.stats.ChunksFetchDurationSum.Seconds()) + } + return nil, io.EOF + } + + next := b.entries[0] + b.entries = b.entries[1:] + + return storepb.NewSeriesResponse(&storepb.Series{ + Labels: labelpb.ZLabelsFromPromLabels(next.lset), + Chunks: next.chks, + }), nil +} + +func (b *blockSeriesClient) nextBatch() error { + start := b.i + end := start + SeriesBatchSize + if end > uint64(len(b.postings)) { + end = uint64(len(b.postings)) + } + b.i = end + + postingsBatch := b.postings[start:end] + if len(postingsBatch) == 0 { + b.hasMorePostings = false + return nil + } + + b.indexr.reset() + if !b.skipChunks { + b.chunkr.reset() + } - for _, id := range ps { - ok, err := indexr.LoadSeriesForTime(id, &symbolizedLset, &chks, skipChunks, minTime, maxTime) + if err := b.indexr.PreloadSeries(b.ctx, postingsBatch, b.bytesLimiter); err != nil { + return errors.Wrap(err, "preload series") + } + + b.entries = b.entries[:0] + for i := 0; i < len(postingsBatch); i++ { + ok, err := b.indexr.LoadSeriesForTime(postingsBatch[i], &b.symbolizedLset, &b.chkMetas, b.skipChunks, b.mint, b.maxt) if err != nil { - return nil, nil, errors.Wrap(err, "read series") + return errors.Wrap(err, "read series") } if !ok { - // No matching chunks for this time duration, skip series. continue } - if err := indexr.LookupLabelsSymbols(symbolizedLset, &lset); err != nil { - return nil, nil, errors.Wrap(err, "Lookup labels symbols") + if err := b.indexr.LookupLabelsSymbols(b.symbolizedLset, &b.lset); err != nil { + return errors.Wrap(err, "Lookup labels symbols") } - completeLabelset := labelpb.ExtendSortedLabels(lset, extLset) - if !shardMatcher.MatchesLabels(completeLabelset) { + completeLabelset := labelpb.ExtendSortedLabels(b.lset, b.extLset) + if !b.shardMatcher.MatchesLabels(completeLabelset) { continue } - s := seriesEntry{} - s.lset = completeLabelset + s := seriesEntry{lset: completeLabelset} + if b.skipChunks { + b.entries = append(b.entries, s) + continue + } - if !skipChunks { - // Schedule loading chunks. - s.refs = make([]chunks.ChunkRef, 0, len(chks)) - s.chks = make([]storepb.AggrChunk, 0, len(chks)) - for j, meta := range chks { - // seriesEntry s is appended to res, but not at every outer loop iteration, - // therefore len(res) is the index we need here, not outer loop iteration number. - if err := chunkr.addLoad(meta.Ref, len(res), j); err != nil { - return nil, nil, errors.Wrap(err, "add chunk load") - } - s.chks = append(s.chks, storepb.AggrChunk{ - MinTime: meta.MinTime, - MaxTime: meta.MaxTime, - }) - s.refs = append(s.refs, meta.Ref) - } + // Schedule loading chunks. + s.refs = make([]chunks.ChunkRef, 0, len(b.chkMetas)) + s.chks = make([]storepb.AggrChunk, 0, len(b.chkMetas)) - // Ensure sample limit through chunksLimiter if we return chunks. - if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { - return nil, nil, errors.Wrap(err, "exceeded chunks limit") + for j, meta := range b.chkMetas { + if err := b.chunkr.addLoad(meta.Ref, len(b.entries), j); err != nil { + return errors.Wrap(err, "add chunk load") } + s.chks = append(s.chks, storepb.AggrChunk{ + MinTime: meta.MinTime, + MaxTime: meta.MaxTime, + }) + s.refs = append(s.refs, meta.Ref) } - res = append(res, s) - } + // Ensure sample limit through chunksLimiter if we return chunks. + if err := b.chunksLimiter.Reserve(uint64(len(b.chkMetas))); err != nil { + return errors.Wrap(err, "exceeded chunks limit") + } - if skipChunks { - return newBucketSeriesSet(res), indexr.stats, nil + b.entries = append(b.entries, s) } - if err := chunkr.load(ctx, res, loadAggregates, calculateChunkHash, bytesLimiter); err != nil { - return nil, nil, errors.Wrap(err, "load chunks") + if !b.skipChunks { + if err := b.chunkr.load(b.ctx, b.entries, b.loadAggregates, b.calculateChunkHash, b.bytesLimiter); err != nil { + return errors.Wrap(err, "load chunks") + } } - return newBucketSeriesSet(res), indexr.stats.merge(chunkr.stats), nil + return nil } func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Aggr, save func([]byte) ([]byte, error), calculateChecksum bool) error { @@ -1062,7 +1164,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie bytesLimiter = s.bytesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("bytes")) ctx = srv.Context() stats = &queryStats{} - res []storepb.SeriesSet + respSets []respSet mtx sync.Mutex g, gctx = errgroup.WithContext(ctx) resHints = &hintspb.SeriesResponseHints{} @@ -1097,64 +1199,63 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } for _, b := range blocks { - b := b + blk := b gctx := gctx if s.enableSeriesResponseHints { // Keep track of queried blocks. - resHints.AddQueriedBlock(b.meta.ULID) - } - - var chunkr *bucketChunkReader - // We must keep the readers open until all their data has been sent. - indexr := b.indexReader() - if !req.SkipChunks { - chunkr = b.chunkReader() - defer runutil.CloseWithLogOnErr(s.logger, chunkr, "series block") + resHints.AddQueriedBlock(blk.meta.ULID) } - // Defer all closes to the end of Series method. - defer runutil.CloseWithLogOnErr(s.logger, indexr, "series block") + shardMatcher := req.ShardInfo.Matcher(&s.buffers) + blockClient := newBlockSeriesClient( + srv.Context(), + s.logger, + blk, + req, + chunksLimiter, + bytesLimiter, + shardMatcher, + s.enableChunkHashCalculation, + s.seriesBatchSize, + s.metrics.chunkFetchDuration, + ) + defer blockClient.Close() g.Go(func() error { - span, newCtx := tracing.StartSpan(gctx, "bucket_store_block_series", tracing.Tags{ - "block.id": b.meta.ULID, - "block.mint": b.meta.MinTime, - "block.maxt": b.meta.MaxTime, - "block.resolution": b.meta.Thanos.Downsample.Resolution, + span, _ := tracing.StartSpan(gctx, "bucket_store_block_series", tracing.Tags{ + "block.id": blk.meta.ULID, + "block.mint": blk.meta.MinTime, + "block.maxt": blk.meta.MaxTime, + "block.resolution": blk.meta.Thanos.Downsample.Resolution, }) - defer span.Finish() - - shardMatcher := req.ShardInfo.Matcher(&s.buffers) - defer shardMatcher.Close() - part, pstats, err := blockSeries( - newCtx, - b.extLset, - indexr, - chunkr, - blockMatchers, - chunksLimiter, - seriesLimiter, - bytesLimiter, - req.SkipChunks, - req.MinTime, req.MaxTime, - req.Aggregates, + + if err := blockClient.ExpandPostings(blockMatchers, seriesLimiter); err != nil { + span.Finish() + return errors.Wrapf(err, "fetch series for block %s", blk.meta.ULID) + } + onClose := func() { + mtx.Lock() + stats = blockClient.MergeStats(stats) + mtx.Unlock() + } + part := newLazyRespSet( + srv.Context(), + span, + 10*time.Minute, + blk.meta.ULID.String(), + []labels.Labels{blk.extLset}, + onClose, + blockClient, shardMatcher, + false, s.metrics.emptyPostingCount, - s.enableChunkHashCalculation, ) - if err != nil { - return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) - } mtx.Lock() - res = append(res, part) - stats = stats.merge(pstats) + respSets = append(respSets, part) mtx.Unlock() - // No info about samples exactly, so pass at least chunks. - span.SetTag("processed.series", len(indexr.loadedSeries)) - span.SetTag("processed.chunks", pstats.chunksFetched) return nil }) } @@ -1204,47 +1305,53 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } return status.Error(code, err.Error()) } - stats.blocksQueried = len(res) + stats.blocksQueried = len(respSets) stats.GetAllDuration = time.Since(begin) s.metrics.seriesGetAllDuration.Observe(stats.GetAllDuration.Seconds()) s.metrics.seriesBlocksQueried.Observe(float64(stats.blocksQueried)) } + // Merge the sub-results from each selected block. tracing.DoInSpan(ctx, "bucket_store_merge_all", func(ctx context.Context) { + defer func() { + for _, resp := range respSets { + resp.Close() + } + }() begin := time.Now() - - // NOTE: We "carefully" assume series and chunks are sorted within each SeriesSet. This should be guaranteed by - // blockSeries method. In worst case deduplication logic won't deduplicate correctly, which will be accounted later. - set := storepb.MergeSeriesSets(res...) + set := NewDedupResponseHeap(NewProxyResponseHeap(respSets...)) for set.Next() { - var series storepb.Series - - stats.mergedSeriesCount++ - - var lset labels.Labels - if req.SkipChunks { - lset, _ = set.At() - } else { - lset, series.Chunks = set.At() + at := set.At() + warn := at.GetWarning() + if warn != "" { + // TODO(fpetkovski): Consider deprecating string based warnings in favor of a + // separate protobuf message containing the grpc code and + // a human readable error message. + err = status.Error(storepb.GRPCCodeFromWarn(warn), at.GetWarning()) + return + } - stats.mergedChunksCount += len(series.Chunks) - s.metrics.chunkSizeBytes.Observe(float64(chunksSize(series.Chunks))) + series := at.GetSeries() + if series != nil { + stats.mergedSeriesCount++ + if !req.SkipChunks { + stats.mergedChunksCount += len(series.Chunks) + s.metrics.chunkSizeBytes.Observe(float64(chunksSize(series.Chunks))) + } } - series.Labels = labelpb.ZLabelsFromPromLabels(lset) - if err = srv.Send(storepb.NewSeriesResponse(&series)); err != nil { + if err = srv.Send(at); err != nil { err = status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) return } } - if set.Err() != nil { - err = status.Error(codes.Unknown, errors.Wrap(set.Err(), "expand series set").Error()) - return - } stats.MergeDuration = time.Since(begin) s.metrics.seriesMergeDuration.Observe(stats.MergeDuration.Seconds()) err = nil }) + if err != nil { + return err + } if s.enableSeriesResponseHints { var anyHints *types.Any @@ -1342,7 +1449,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq } // Add a set for the external labels as well. - // We're not adding them directly to res because there could be duplicates. + // We're not adding them directly to refs because there could be duplicates. // b.extLset is already sorted by label name, no need to sort it again. extRes := make([]string, 0, len(b.extLset)) for _, l := range b.extLset { @@ -1351,40 +1458,43 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq result = strutil.MergeSlices(res, extRes) } else { - seriesSet, _, err := blockSeries( - newCtx, - b.extLset, - indexr, - nil, + seriesReq := &storepb.SeriesRequest{ + MinTime: req.Start, + MaxTime: req.End, + SkipChunks: true, + } + blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + + if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, - nil, seriesLimiter, - bytesLimiter, - true, - req.Start, - req.End, - nil, - nil, - s.metrics.emptyPostingCount, - false, - ) - if err != nil { - return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) + ); err != nil { + return err } // Extract label names from all series. Many label names will be the same, so we need to deduplicate them. // Note that label names will already include external labels (passed to blockSeries), so we don't need // to add them again. labelNames := map[string]struct{}{} - for seriesSet.Next() { - ls, _ := seriesSet.At() - for _, l := range ls { + for { + ls, err := blockClient.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrapf(err, "iterate series for block %s", b.meta.ULID) + } + + if ls.GetWarning() != "" { + return errors.Wrapf(errors.New(ls.GetWarning()), "iterate series for block %s", b.meta.ULID) + } + if ls.GetSeries() == nil { + continue + } + for _, l := range ls.GetSeries().Labels { labelNames[l.Name] = struct{}{} } } - if seriesSet.Err() != nil { - return errors.Wrapf(seriesSet.Err(), "iterate series for block %s", b.meta.ULID) - } result = make([]string, 0, len(labelNames)) for n := range labelNames { @@ -1522,40 +1632,44 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR } result = res } else { - seriesSet, _, err := blockSeries( - newCtx, - b.extLset, - indexr, - nil, + seriesReq := &storepb.SeriesRequest{ + MinTime: req.Start, + MaxTime: req.End, + SkipChunks: true, + } + blockClient := newBlockSeriesClient(newCtx, s.logger, b, seriesReq, nil, bytesLimiter, nil, true, SeriesBatchSize, s.metrics.chunkFetchDuration) + + if err := blockClient.ExpandPostings( reqSeriesMatchersNoExtLabels, - nil, seriesLimiter, - bytesLimiter, - true, - req.Start, - req.End, - nil, - nil, - s.metrics.emptyPostingCount, - false, - ) - if err != nil { - return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) + ); err != nil { + return err } // Extract given label's value from all series and deduplicate them. // We don't need to deal with external labels, since they are already added by blockSeries. values := map[string]struct{}{} - for seriesSet.Next() { - ls, _ := seriesSet.At() - val := ls.Get(req.Label) + for { + ls, err := blockClient.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrapf(err, "iterate series for block %s", b.meta.ULID) + } + + if ls.GetWarning() != "" { + return errors.Wrapf(errors.New(ls.GetWarning()), "iterate series for block %s", b.meta.ULID) + } + if ls.GetSeries() == nil { + continue + } + + val := labelpb.ZLabelsToPromLabels(ls.GetSeries().Labels).Get(req.Label) if val != "" { // Should never be empty since we added labelName!="" matcher to the list of matchers. values[val] = struct{}{} } } - if seriesSet.Err() != nil { - return errors.Wrapf(seriesSet.Err(), "iterate series for block %s", b.meta.ULID) - } result = make([]string, 0, len(values)) for n := range values { @@ -1915,6 +2029,9 @@ func newBucketIndexReader(block *bucketBlock) *bucketIndexReader { } return r } +func (r *bucketIndexReader) reset() { + r.loadedSeries = map[storage.SeriesRef][]byte{} +} // ExpandedPostings returns postings in expanded list instead of index.Postings. // This is because we need to have them buffered anyway to perform efficient lookup @@ -2610,6 +2727,12 @@ func newBucketChunkReader(block *bucketBlock) *bucketChunkReader { } } +func (r *bucketChunkReader) reset() { + for i := range r.toLoad { + r.toLoad[i] = r.toLoad[i][:0] + } +} + func (r *bucketChunkReader) Close() error { r.block.pendingReaders.Done() @@ -2620,7 +2743,7 @@ func (r *bucketChunkReader) Close() error { } // addLoad adds the chunk with id to the data set to be fetched. -// Chunk will be fetched and saved to res[seriesEntry][chunk] upon r.load(res, <...>) call. +// Chunk will be fetched and saved to refs[seriesEntry][chunk] upon r.load(refs, <...>) call. func (r *bucketChunkReader) addLoad(id chunks.ChunkRef, seriesEntry, chunk int) error { var ( seq = int(id >> 32) @@ -2633,7 +2756,7 @@ func (r *bucketChunkReader) addLoad(id chunks.ChunkRef, seriesEntry, chunk int) return nil } -// load loads all added chunks and saves resulting aggrs to res. +// load loads all added chunks and saves resulting aggrs to refs. func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, calculateChunkChecksum bool, bytesLimiter BytesLimiter) error { g, ctx := errgroup.WithContext(ctx) @@ -2667,6 +2790,9 @@ func (r *bucketChunkReader) load(ctx context.Context, res []seriesEntry, aggrs [ // This data range covers chunks starting at supplied offsets. func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, aggrs []storepb.Aggr, seq int, part Part, pIdxs []loadIdx, calculateChunkChecksum bool, bytesLimiter BytesLimiter) error { fetchBegin := time.Now() + defer func() { + r.stats.ChunksFetchDurationSum += time.Since(fetchBegin) + }() // Get a reader for the required range. reader, err := r.block.chunkRangeReader(ctx, seq, int64(part.Start), int64(part.End-part.Start)) @@ -2687,7 +2813,6 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a r.stats.chunksFetchCount++ r.stats.chunksFetched += len(pIdxs) - r.stats.ChunksFetchDurationSum += time.Since(fetchBegin) r.stats.ChunksFetchedSizeSum += units.Base2Bytes(int(part.End - part.Start)) var ( @@ -2776,7 +2901,6 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a locked = true r.stats.chunksFetchCount++ - r.stats.ChunksFetchDurationSum += time.Since(fetchBegin) r.stats.ChunksFetchedSizeSum += units.Base2Bytes(len(*nb)) err = populateChunk(&(res[pIdx.seriesEntry].chks[pIdx.chunk]), rawChunk((*nb)[n:]), aggrs, r.save, calculateChunkChecksum) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go index d354d4db066..5cdb5a0b781 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go @@ -260,11 +260,11 @@ func (h *ProxyResponseHeap) At() *storepb.SeriesResponse { } func (l *lazyRespSet) StoreID() string { - return l.st.String() + return l.storeName } func (l *lazyRespSet) Labelset() string { - return labelpb.PromLabelSetsToString(l.st.LabelSets()) + return labelpb.PromLabelSetsToString(l.storeLabelSets) } // lazyRespSet is a lazy storepb.SeriesSet that buffers @@ -273,12 +273,13 @@ func (l *lazyRespSet) Labelset() string { // in Next(). type lazyRespSet struct { // Generic parameters. - span opentracing.Span - cl storepb.Store_SeriesClient - closeSeries context.CancelFunc - st Client - frameTimeout time.Duration - ctx context.Context + span opentracing.Span + cl storepb.Store_SeriesClient + closeSeries context.CancelFunc + storeName string + storeLabelSets []labels.Labels + frameTimeout time.Duration + ctx context.Context // Internal bookkeeping. dataOrFinishEvent *sync.Cond @@ -358,13 +359,13 @@ func newLazyRespSet( ctx context.Context, span opentracing.Span, frameTimeout time.Duration, - st Client, + storeName string, + storeLabelSets []labels.Labels, closeSeries context.CancelFunc, cl storepb.Store_SeriesClient, shardMatcher *storepb.ShardMatcher, applySharding bool, emptyStreamResponses prometheus.Counter, - ) respSet { bufferedResponses := []*storepb.SeriesResponse{} bufferedResponsesMtx := &sync.Mutex{} @@ -373,7 +374,8 @@ func newLazyRespSet( respSet := &lazyRespSet{ frameTimeout: frameTimeout, cl: cl, - st: st, + storeName: storeName, + storeLabelSets: storeLabelSets, closeSeries: closeSeries, span: span, ctx: ctx, @@ -383,7 +385,7 @@ func newLazyRespSet( shardMatcher: shardMatcher, } - go func(st Client, l *lazyRespSet) { + go func(st string, l *lazyRespSet) { bytesProcessed := 0 seriesStats := &storepb.SeriesStatsCounter{} @@ -409,7 +411,7 @@ func newLazyRespSet( select { case <-l.ctx.Done(): - err := errors.Wrapf(l.ctx.Err(), "failed to receive any data from %s", st.String()) + err := errors.Wrapf(l.ctx.Err(), "failed to receive any data from %s", st) l.span.SetTag("err", err.Error()) l.bufferedResponsesMtx.Lock() @@ -434,9 +436,9 @@ func newLazyRespSet( // Most likely the per-Recv timeout has been reached. // There's a small race between canceling and the Recv() // but this is most likely true. - rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, st.String()) + rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, st) } else { - rerr = errors.Wrapf(err, "receive series from %s", st.String()) + rerr = errors.Wrapf(err, "receive series from %s", st) } l.span.SetTag("err", rerr.Error()) @@ -478,7 +480,7 @@ func newLazyRespSet( return } } - }(st, respSet) + }(storeName, respSet) return respSet } @@ -552,7 +554,8 @@ func newAsyncRespSet(ctx context.Context, seriesCtx, span, frameTimeout, - st, + st.String(), + st.LabelSets(), closeSeries, cl, shardMatcher, diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go index eaa96f1ede6..c1f4b9b8bfe 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -14,6 +14,7 @@ import ( "github.com/gogo/protobuf/types" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" + "google.golang.org/grpc/codes" "github.com/thanos-io/thanos/pkg/store/labelpb" ) @@ -51,6 +52,16 @@ func NewHintsSeriesResponse(hints *types.Any) *SeriesResponse { } } +func GRPCCodeFromWarn(warn string) codes.Code { + if strings.Contains(warn, "rpc error: code = ResourceExhausted") { + return codes.ResourceExhausted + } + if strings.Contains(warn, "rpc error: code = Code(422)") { + return 422 + } + return codes.Unknown +} + type emptySeriesSet struct{} func (emptySeriesSet) Next() bool { return false } diff --git a/vendor/modules.txt b/vendor/modules.txt index 33174cb5a8c..eb936cc0252 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -259,12 +259,14 @@ github.com/dustin/go-humanize # github.com/edsrzf/mmap-go v1.1.0 ## explicit; go 1.17 github.com/edsrzf/mmap-go -# github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd -## explicit; go 1.15 -github.com/efficientgo/tools/core/pkg/errcapture -github.com/efficientgo/tools/core/pkg/logerrcapture -github.com/efficientgo/tools/core/pkg/merrors -github.com/efficientgo/tools/core/pkg/testutil +# github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 +## explicit; go 1.17 +github.com/efficientgo/core/errcapture +github.com/efficientgo/core/errors +github.com/efficientgo/core/logerrcapture +github.com/efficientgo/core/merrors +github.com/efficientgo/core/testutil +github.com/efficientgo/core/testutil/internal # github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb ## explicit github.com/facette/natsort @@ -535,7 +537,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.37 +# github.com/minio/minio-go/v7 v7.0.45 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials @@ -667,7 +669,7 @@ github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/exporter-toolkit v0.7.1 +# github.com/prometheus/exporter-toolkit v0.7.3 ## explicit; go 1.14 github.com/prometheus/exporter-toolkit/web # github.com/prometheus/procfs v0.8.0 @@ -766,7 +768,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 +# github.com/thanos-io/objstore v0.0.0-20221205132204-5aafc0079f06 ## explicit; go 1.18 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp @@ -776,7 +778,7 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing -# github.com/thanos-io/thanos v0.29.1-0.20221115064008-fe45cfc66b7d +# github.com/thanos-io/thanos v0.29.1-0.20221215161655-4054531421b5 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader