From 28cf3b6c5deb5fd5022ad067e4f4602a98df9153 Mon Sep 17 00:00:00 2001 From: Albert Date: Fri, 9 Jul 2021 19:27:18 -0700 Subject: [PATCH 001/137] add shuffle sharding grouper/planner Signed-off-by: Albert Signed-off-by: Alvin Lin --- docs/blocks-storage/compactor.md | 5 + docs/configuration/config-file-reference.md | 4 + pkg/compactor/compactor.go | 49 +- pkg/compactor/shuffle_sharding_grouper.go | 379 +++++++++++++ .../shuffle_sharding_grouper_test.go | 496 ++++++++++++++++++ pkg/compactor/shuffle_sharding_planner.go | 39 ++ 6 files changed, 968 insertions(+), 4 deletions(-) create mode 100644 pkg/compactor/shuffle_sharding_grouper.go create mode 100644 pkg/compactor/shuffle_sharding_grouper_test.go create mode 100644 pkg/compactor/shuffle_sharding_planner.go diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index 213405822c8..6579ad4c663 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -173,6 +173,11 @@ compactor: # CLI flag: -compactor.sharding-enabled [sharding_enabled: | default = false] + # The sharding strategy to use. Supported values are: default, + # shuffle-sharding. + # CLI flag: -compactor.sharding-strategy + [sharding_strategy: | default = "default"] + sharding_ring: kvstore: # Backend storage to use for the ring. Supported values are: consul, etcd, diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 9b20d81bea4..692349cc086 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -5138,6 +5138,10 @@ The `compactor_config` configures the compactor for the blocks storage. # CLI flag: -compactor.sharding-enabled [sharding_enabled: | default = false] +# The sharding strategy to use. Supported values are: default, shuffle-sharding. +# CLI flag: -compactor.sharding-strategy +[sharding_strategy: | default = "default"] + sharding_ring: kvstore: # Backend storage to use for the ring. Supported values are: consul, etcd, diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index c41f7a9cccd..f67f0e941ea 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -44,6 +44,9 @@ var ( errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s" RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, nil) + supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} + errInvalidShardingStrategy = errors.New("invalid sharding strategy") + DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, garbageCollectedBlocks prometheus.Counter) compact.Grouper { return compact.NewDefaultGrouper( logger, @@ -56,6 +59,19 @@ var ( metadata.NoneFunc) } + ShuffleShardingGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, garbageCollectedBlocks prometheus.Counter) compact.Grouper { + return NewShuffleShardingGrouper( + logger, + bkt, + false, // Do not accept malformed indexes + true, // Enable vertical compaction + reg, + blocksMarkedForDeletion, + garbageCollectedBlocks, + metadata.NoneFunc, + cfg) + } + DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, compact.Planner, error) { compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) if err != nil { @@ -65,6 +81,16 @@ var ( planner := compact.NewTSDBBasedPlanner(logger, cfg.BlockRanges.ToMilliseconds()) return compactor, planner, nil } + + ShuffleShardingBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, compact.Planner, error) { + compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) + if err != nil { + return nil, nil, err + } + + planner := NewShuffleShardingPlanner(logger, cfg.BlockRanges.ToMilliseconds()) + return compactor, planner, nil + } ) // BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks. @@ -108,8 +134,9 @@ type Config struct { DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` // Compactors sharding. - ShardingEnabled bool `yaml:"sharding_enabled"` - ShardingRing RingConfig `yaml:"sharding_ring"` + ShardingEnabled bool `yaml:"sharding_enabled"` + ShardingStrategy string `yaml:"sharding_strategy"` + ShardingRing RingConfig `yaml:"sharding_ring"` // No need to add options to customize the retry backoff, // given the defaults should be fine, but allow to override @@ -141,6 +168,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.CleanupInterval, "compactor.cleanup-interval", 15*time.Minute, "How frequently compactor should run blocks cleanup and maintenance, as well as update the bucket index.") f.IntVar(&cfg.CleanupConcurrency, "compactor.cleanup-concurrency", 20, "Max number of tenants for which blocks cleanup and maintenance should run concurrently.") f.BoolVar(&cfg.ShardingEnabled, "compactor.sharding-enabled", false, "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.") + f.StringVar(&cfg.ShardingStrategy, "compactor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) f.DurationVar(&cfg.DeletionDelay, "compactor.deletion-delay", 12*time.Hour, "Time before a block marked for deletion is deleted from bucket. "+ "If not 0, blocks will be marked for deletion and compactor component will permanently delete blocks marked for deletion from the bucket. "+ "If 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures.") @@ -159,6 +187,11 @@ func (cfg *Config) Validate() error { } } + // Make sure a valid sharding strategy is being used + if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + return errInvalidShardingStrategy + } + return nil } @@ -230,12 +263,20 @@ func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfi blocksGrouperFactory := compactorCfg.BlocksGrouperFactory if blocksGrouperFactory == nil { - blocksGrouperFactory = DefaultBlocksGrouperFactory + if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { + blocksGrouperFactory = ShuffleShardingGrouperFactory + } else { + blocksGrouperFactory = DefaultBlocksGrouperFactory + } } blocksCompactorFactory := compactorCfg.BlocksCompactorFactory if blocksCompactorFactory == nil { - blocksCompactorFactory = DefaultBlocksCompactorFactory + if compactorCfg.ShardingStrategy == util.ShardingStrategyShuffle { + blocksCompactorFactory = ShuffleShardingBlocksCompactorFactory + } else { + blocksCompactorFactory = DefaultBlocksCompactorFactory + } } cortexCompactor, err := newCompactor(compactorCfg, storageCfg, cfgProvider, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory) diff --git a/pkg/compactor/shuffle_sharding_grouper.go b/pkg/compactor/shuffle_sharding_grouper.go new file mode 100644 index 00000000000..82e14dcfc0e --- /dev/null +++ b/pkg/compactor/shuffle_sharding_grouper.go @@ -0,0 +1,379 @@ +package compactor + +import ( + "fmt" + "hash/fnv" + "sort" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/compact" + "github.com/thanos-io/thanos/pkg/objstore" +) + +type ShuffleShardingGrouper struct { + logger log.Logger + bkt objstore.Bucket + acceptMalformedIndex bool + enableVerticalCompaction bool + reg prometheus.Registerer + blocksMarkedForDeletion prometheus.Counter + garbageCollectedBlocks prometheus.Counter + hashFunc metadata.HashFunc + compactions *prometheus.CounterVec + compactionRunsStarted *prometheus.CounterVec + compactionRunsCompleted *prometheus.CounterVec + compactionFailures *prometheus.CounterVec + verticalCompactions *prometheus.CounterVec + compactorCfg Config +} + +func NewShuffleShardingGrouper( + logger log.Logger, + bkt objstore.Bucket, + acceptMalformedIndex bool, + enableVerticalCompaction bool, + reg prometheus.Registerer, + blocksMarkedForDeletion prometheus.Counter, + garbageCollectedBlocks prometheus.Counter, + hashFunc metadata.HashFunc, + compactorCfg Config, +) *ShuffleShardingGrouper { + if logger == nil { + logger = log.NewNopLogger() + } + + return &ShuffleShardingGrouper{ + logger: logger, + bkt: bkt, + acceptMalformedIndex: acceptMalformedIndex, + enableVerticalCompaction: enableVerticalCompaction, + reg: reg, + blocksMarkedForDeletion: blocksMarkedForDeletion, + garbageCollectedBlocks: garbageCollectedBlocks, + hashFunc: hashFunc, + compactions: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_compact_group_compactions_total", + Help: "Total number of group compaction attempts that resulted in a new block.", + }, []string{"group"}), + compactionRunsStarted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_compact_group_compaction_runs_started_total", + Help: "Total number of group compaction attempts.", + }, []string{"group"}), + compactionRunsCompleted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_compact_group_compaction_runs_completed_total", + Help: "Total number of group completed compaction runs. This also includes compactor group runs that resulted with no compaction.", + }, []string{"group"}), + compactionFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_compact_group_compactions_failures_total", + Help: "Total number of failed group compactions.", + }, []string{"group"}), + verticalCompactions: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_compact_group_vertical_compactions_total", + Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.", + }, []string{"group"}), + compactorCfg: compactorCfg, + } +} + +// Groups function modified from https://github.com/cortexproject/cortex/pull/2616 +func (g *ShuffleShardingGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*compact.Group, err error) { + // First of all we have to group blocks using the Thanos default + // grouping (based on downsample resolution + external labels). + mainGroups := map[string][]*metadata.Meta{} + for _, b := range blocks { + key := compact.DefaultGroupKey(b.Thanos) + mainGroups[key] = append(mainGroups[key], b) + } + + // For each group, we have to further split it into set of blocks + // which we can parallelly compact. + var outGroups []*compact.Group + + i := 0 + for _, mainBlocks := range mainGroups { + for _, group := range groupBlocksByCompactableRanges(mainBlocks, g.compactorCfg.BlockRanges.ToMilliseconds()) { + // Nothing to do if we don't have at least 2 blocks. + if len(group.blocks) < 2 { + continue + } + + // TODO: Use the group's hash to determine whether a compactor should be responsible for compacting that group + groupHash := hashGroup(group.blocks[0].Thanos.Labels["__org_id__"], group.rangeStart, group.rangeEnd) + + groupKey := fmt.Sprintf("%v%d", groupHash, i) + i++ + + level.Debug(g.logger).Log("msg", "found compactable group for user", "user", group.blocks[0].Thanos.Labels["__org_id__"], "plan", group.String()) + + // All the blocks within the same group have the same downsample + // resolution and external labels. + resolution := group.blocks[0].Thanos.Downsample.Resolution + externalLabels := labels.FromMap(group.blocks[0].Thanos.Labels) + + thanosGroup, err := compact.NewGroup( + log.With(g.logger, "groupKey", groupKey, "rangeStart", group.rangeStartTime().String(), "rangeEnd", group.rangeEndTime().String(), "externalLabels", externalLabels, "downsampleResolution", resolution), + g.bkt, + groupKey, + externalLabels, + resolution, + false, // No malformed index. + true, // Enable vertical compaction. + g.compactions.WithLabelValues(groupKey), + g.compactionRunsStarted.WithLabelValues(groupKey), + g.compactionRunsCompleted.WithLabelValues(groupKey), + g.compactionFailures.WithLabelValues(groupKey), + g.verticalCompactions.WithLabelValues(groupKey), + g.garbageCollectedBlocks, + g.blocksMarkedForDeletion, + g.hashFunc, + ) + if err != nil { + return nil, errors.Wrap(err, "create compaction group") + } + + for _, m := range group.blocks { + if err := thanosGroup.AppendMeta(m); err != nil { + return nil, errors.Wrap(err, "add block to compaction group") + } + } + + outGroups = append(outGroups, thanosGroup) + } + } + + // Ensure groups are sorted by smallest range, oldest min time first. The rationale + // is that we wanna favor smaller ranges first (ie. to deduplicate samples sooner + // than later) and older ones are more likely to be "complete" (no missing block still + // to be uploaded). + sort.SliceStable(outGroups, func(i, j int) bool { + iLength := outGroups[i].MaxTime() - outGroups[i].MinTime() + jLength := outGroups[j].MaxTime() - outGroups[j].MinTime() + + if iLength != jLength { + return iLength < jLength + } + if outGroups[i].MinTime() != outGroups[j].MinTime() { + return outGroups[i].MinTime() < outGroups[j].MinTime() + } + + // Guarantee stable sort for tests. + return outGroups[i].Key() < outGroups[j].Key() + }) + + return outGroups, nil +} + +// Get the hash of a group based on the UserID, and the starting and ending time of the group's range. +func hashGroup(userID string, rangeStart int64, rangeEnd int64) uint32 { + groupString := fmt.Sprintf("%v%v%v", userID, rangeStart, rangeEnd) + groupHasher := fnv.New32a() + // Hasher never returns err. + _, _ = groupHasher.Write([]byte(groupString)) + groupHash := groupHasher.Sum32() + + return groupHash +} + +// blocksGroup struct and functions copied and adjusted from https://github.com/cortexproject/cortex/pull/2616 +type blocksGroup struct { + rangeStart int64 // Included. + rangeEnd int64 // Excluded. + blocks []*metadata.Meta + key string +} + +// overlaps returns whether the group range overlaps with the input group. +func (g blocksGroup) overlaps(other blocksGroup) bool { + if g.rangeStart >= other.rangeEnd || other.rangeStart >= g.rangeEnd { + return false + } + + return true +} + +func (g blocksGroup) rangeStartTime() time.Time { + return time.Unix(0, g.rangeStart*int64(time.Millisecond)).UTC() +} + +func (g blocksGroup) rangeEndTime() time.Time { + return time.Unix(0, g.rangeEnd*int64(time.Millisecond)).UTC() +} + +func (g blocksGroup) String() string { + out := strings.Builder{} + out.WriteString(fmt.Sprintf("Group range start: %d, range end: %d, key %v, blocks: ", g.rangeStart, g.rangeEnd, g.key)) + + for i, b := range g.blocks { + if i > 0 { + out.WriteString(", ") + } + + minT := time.Unix(0, b.MinTime*int64(time.Millisecond)).UTC() + maxT := time.Unix(0, b.MaxTime*int64(time.Millisecond)).UTC() + out.WriteString(fmt.Sprintf("%s (min time: %s, max time: %s)", b.ULID.String(), minT.String(), maxT.String())) + } + + return out.String() +} + +func (g blocksGroup) rangeLength() int64 { + return g.rangeEnd - g.rangeStart +} + +// minTime returns the MinTime across all blocks in the group. +func (g blocksGroup) minTime() int64 { + // Blocks are expected to be sorted by MinTime. + return g.blocks[0].MinTime +} + +// maxTime returns the MaxTime across all blocks in the group. +func (g blocksGroup) maxTime() int64 { + max := g.blocks[0].MaxTime + + for _, b := range g.blocks[1:] { + if b.MaxTime > max { + max = b.MaxTime + } + } + + return max +} + +// groupBlocksByCompactableRanges groups input blocks by compactable ranges, giving preference +// to smaller ranges. If a smaller range contains more than 1 block (and thus it should +// be compacted), the larger range block group is not generated until each of its +// smaller ranges have 1 block each at most. +func groupBlocksByCompactableRanges(blocks []*metadata.Meta, ranges []int64) []blocksGroup { + if len(blocks) == 0 { + return nil + } + + // Sort blocks by min time. + sortMetasByMinTime(blocks) + + var groups []blocksGroup + + for _, tr := range ranges { + nextGroup: + for _, group := range groupBlocksByRange(blocks, tr) { + + // Exclude groups with a single block, because no compaction is required. + if len(group.blocks) < 2 { + continue + } + + // Ensure this group's range does not overlap with any group already scheduled + // for compaction by a smaller range, because we need to guarantee that smaller ranges + // are compacted first. + for _, c := range groups { + if group.overlaps(c) { + continue nextGroup + } + } + + groups = append(groups, group) + } + } + + // Ensure we don't compact the most recent blocks prematurely when another one of + // the same size still fits in the range. To do it, we consider valid a group only + // if it's before the most recent block or if it fully covers the range. + highestMinTime := blocks[len(blocks)-1].MinTime + + for idx := 0; idx < len(groups); { + group := groups[idx] + + // If the group covers a range before the most recent block, it's fine. + if group.rangeEnd <= highestMinTime { + idx++ + continue + } + + // If the group covers the full range, it's fine. + if group.maxTime()-group.minTime() == group.rangeLength() { + idx++ + continue + } + + // We hit into a group which would compact recent blocks prematurely, + // so we need to filter it out. + + groups = append(groups[:idx], groups[idx+1:]...) + } + + return groups +} + +// groupBlocksByRange splits the blocks by the time range. The range sequence starts at 0. +// Input blocks are expected to be sorted by MinTime. +// +// For example, if we have blocks [0-10, 10-20, 50-60, 90-100] and the split range tr is 30 +// it returns [0-10, 10-20], [50-60], [90-100]. +func groupBlocksByRange(blocks []*metadata.Meta, tr int64) []blocksGroup { + var ret []blocksGroup + + for i := 0; i < len(blocks); { + var ( + group blocksGroup + m = blocks[i] + ) + + group.rangeStart = getRangeStart(m, tr) + group.rangeEnd = group.rangeStart + tr + + // Skip blocks that don't fall into the range. This can happen via mis-alignment or + // by being the multiple of the intended range. + if m.MaxTime > group.rangeEnd { + i++ + continue + } + + // Add all blocks to the current group that are within [t0, t0+tr]. + for ; i < len(blocks); i++ { + // If the block does not start within this group, then we should break the iteration + // and move it to the next group. + if blocks[i].MinTime >= group.rangeEnd { + break + } + + // If the block doesn't fall into this group, but it started within this group then it + // means it spans across multiple ranges and we should skip it. + if blocks[i].MaxTime > group.rangeEnd { + continue + } + + group.blocks = append(group.blocks, blocks[i]) + } + + if len(group.blocks) > 0 { + ret = append(ret, group) + } + } + + return ret +} + +func getRangeStart(m *metadata.Meta, tr int64) int64 { + // Compute start of aligned time range of size tr closest to the current block's start. + // This code has been copied from TSDB. + if m.MinTime >= 0 { + return tr * (m.MinTime / tr) + } + + return tr * ((m.MinTime - tr + 1) / tr) +} + +func sortMetasByMinTime(metas []*metadata.Meta) { + sort.Slice(metas, func(i, j int) bool { + return metas[i].BlockMeta.MinTime < metas[j].BlockMeta.MinTime + }) +} diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go new file mode 100644 index 00000000000..5f6429dbb78 --- /dev/null +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -0,0 +1,496 @@ +package compactor + +import ( + "testing" + "time" + + "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/block/metadata" +) + +func TestTimeShardingGrouper_Groups(t *testing.T) { + block1ulid := ulid.MustNew(1, nil) + block2ulid := ulid.MustNew(2, nil) + block3ulid := ulid.MustNew(3, nil) + block4ulid := ulid.MustNew(4, nil) + block5ulid := ulid.MustNew(5, nil) + block6ulid := ulid.MustNew(6, nil) + block7ulid := ulid.MustNew(7, nil) + block8ulid := ulid.MustNew(8, nil) + block9ulid := ulid.MustNew(9, nil) + block10ulid := ulid.MustNew(10, nil) + block11ulid := ulid.MustNew(11, nil) + block12ulid := ulid.MustNew(12, nil) + block13ulid := ulid.MustNew(13, nil) + block14ulid := ulid.MustNew(14, nil) + block15ulid := ulid.MustNew(15, nil) + + blocks := + map[ulid.ULID]*metadata.Meta{ + block1ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block1ulid, MinTime: 1 * time.Hour.Milliseconds(), MaxTime: 2 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block2ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block2ulid, MinTime: 3 * time.Hour.Milliseconds(), MaxTime: 4 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block3ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block3ulid, MinTime: 0 * time.Hour.Milliseconds(), MaxTime: 1 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block4ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block4ulid, MinTime: 2 * time.Hour.Milliseconds(), MaxTime: 3 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block5ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block5ulid, MinTime: 1 * time.Hour.Milliseconds(), MaxTime: 2 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "2"}}, + }, + block6ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block6ulid, MinTime: 0 * time.Hour.Milliseconds(), MaxTime: 1 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "2"}}, + }, + block7ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block7ulid, MinTime: 0 * time.Hour.Milliseconds(), MaxTime: 1 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block8ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block8ulid, MinTime: 0 * time.Hour.Milliseconds(), MaxTime: 1 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "2"}}, + }, + block9ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block9ulid, MinTime: 0 * time.Hour.Milliseconds(), MaxTime: 1 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "3"}}, + }, + block10ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block10ulid, MinTime: 4 * time.Hour.Milliseconds(), MaxTime: 6 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "2"}}, + }, + block11ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block11ulid, MinTime: 6 * time.Hour.Milliseconds(), MaxTime: 8 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "2"}}, + }, + block12ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block12ulid, MinTime: 1 * time.Hour.Milliseconds(), MaxTime: 2 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block13ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block13ulid, MinTime: 0 * time.Hour.Milliseconds(), MaxTime: 20 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block14ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block14ulid, MinTime: 21 * time.Hour.Milliseconds(), MaxTime: 40 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + block15ulid: { + BlockMeta: tsdb.BlockMeta{ULID: block15ulid, MinTime: 21 * time.Hour.Milliseconds(), MaxTime: 40 * time.Hour.Milliseconds()}, + Thanos: metadata.Thanos{Labels: map[string]string{"__org_id__": "1"}}, + }, + } + + tests := map[string]struct { + ranges []time.Duration + blocks map[ulid.ULID]*metadata.Meta + expected [][]ulid.ULID + }{ + "test basic grouping": { + ranges: []time.Duration{2 * time.Hour, 4 * time.Hour}, + blocks: map[ulid.ULID]*metadata.Meta{block1ulid: blocks[block1ulid], block2ulid: blocks[block2ulid], block3ulid: blocks[block3ulid], block4ulid: blocks[block4ulid], block5ulid: blocks[block5ulid], block6ulid: blocks[block6ulid]}, + expected: [][]ulid.ULID{ + {block5ulid, block6ulid}, + {block1ulid, block3ulid}, + {block2ulid, block4ulid}, + }, + }, + "test no compaction": { + ranges: []time.Duration{2 * time.Hour, 4 * time.Hour}, + blocks: map[ulid.ULID]*metadata.Meta{block7ulid: blocks[block7ulid], block8ulid: blocks[block8ulid], block9ulid: blocks[block9ulid]}, + expected: [][]ulid.ULID{}, + }, + "test smallest range first": { + ranges: []time.Duration{2 * time.Hour, 4 * time.Hour}, + blocks: map[ulid.ULID]*metadata.Meta{block1ulid: blocks[block1ulid], block2ulid: blocks[block2ulid], block3ulid: blocks[block3ulid], block4ulid: blocks[block4ulid], block10ulid: blocks[block10ulid], block11ulid: blocks[block11ulid]}, + expected: [][]ulid.ULID{ + {block1ulid, block3ulid}, + {block2ulid, block4ulid}, + {block10ulid, block11ulid}, + }, + }, + "test oldest min time first": { + ranges: []time.Duration{2 * time.Hour, 4 * time.Hour}, + blocks: map[ulid.ULID]*metadata.Meta{block1ulid: blocks[block1ulid], block2ulid: blocks[block2ulid], block3ulid: blocks[block3ulid], block4ulid: blocks[block4ulid], block12ulid: blocks[block12ulid]}, + expected: [][]ulid.ULID{ + {block1ulid, block3ulid, block12ulid}, + {block2ulid, block4ulid}, + }, + }, + "test overlapping blocks": { + ranges: []time.Duration{20 * time.Hour, 40 * time.Hour}, + blocks: map[ulid.ULID]*metadata.Meta{block13ulid: blocks[block13ulid], block14ulid: blocks[block14ulid], block15ulid: blocks[block15ulid]}, + expected: [][]ulid.ULID{}, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + compactorCfg := &Config{ + BlockRanges: testData.ranges, + } + + g := NewShuffleShardingGrouper(nil, + nil, + false, // Do not accept malformed indexes + true, // Enable vertical compaction + prometheus.NewRegistry(), + nil, + nil, + metadata.NoneFunc, + *compactorCfg) + actual, err := g.Groups(testData.blocks) + require.NoError(t, err) + require.Len(t, actual, len(testData.expected)) + + for idx, expectedIDs := range testData.expected { + assert.Equal(t, expectedIDs, actual[idx].IDs()) + } + }) + } +} + +func TestGroupBlocksByCompactableRanges(t *testing.T) { + tests := map[string]struct { + ranges []int64 + blocks []*metadata.Meta + expected []blocksGroup + }{ + "no input blocks": { + ranges: []int64{20}, + blocks: nil, + expected: nil, + }, + "only 1 block in input": { + ranges: []int64{20}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + }, + expected: nil, + }, + "only 1 block for each range (single range)": { + ranges: []int64{20}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + }, + expected: nil, + }, + "only 1 block for each range (multiple ranges)": { + ranges: []int64{10, 20}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + }, + expected: nil, + }, + "input blocks can be compacted on the 1st range only": { + ranges: []int64{20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 25, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 50}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 50, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, + }, + expected: []blocksGroup{ + {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 25, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + }}, + {rangeStart: 40, rangeEnd: 60, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 50}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 50, MaxTime: 60}}, + }}, + }, + }, + "input blocks can be compacted on the 2nd range only": { + ranges: []int64{10, 20}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, + }, + expected: []blocksGroup{ + {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + }}, + {rangeStart: 60, rangeEnd: 80, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, + }}, + }, + }, + "input blocks can be compacted on a mix of 1st and 2nd ranges, guaranteeing no overlaps and giving preference to smaller ranges": { + ranges: []int64{10, 20}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 7, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 60, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 75, MaxTime: 80}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 10, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 10}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 7, MaxTime: 10}}, + }}, + {rangeStart: 70, rangeEnd: 80, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 70, MaxTime: 80}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 75, MaxTime: 80}}, + }}, + {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + }}, + }, + }, + "input blocks have already been compacted with the largest range": { + ranges: []int64{10, 20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + }, + expected: nil, + }, + "input blocks match the largest range but can be compacted because overlapping": { + ranges: []int64{10, 20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + }, + expected: []blocksGroup{ + {rangeStart: 80, rangeEnd: 120, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + }}, + }, + }, + "a block with time range crossing two 1st level ranges should be NOT considered for 1st level compaction": { + ranges: []int64{20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + }, + expected: []blocksGroup{ + {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 40}}, + }}, + }, + }, + "a block with time range crossing two 1st level ranges should BE considered for 2nd level compaction": { + ranges: []int64{20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 30}}, // This block spans across two 1st level ranges. + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 40}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 40, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 30}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 40}}, + }}, + }, + }, + "a block with time range larger then the largest compaction range should NOT be considered for compaction": { + ranges: []int64{10, 20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 30, MaxTime: 150}}, // This block is larger then the largest compaction range. + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 70}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + }, + expected: []blocksGroup{ + {rangeStart: 80, rangeEnd: 120, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 80, MaxTime: 120}}, + }}, + }, + }, + "a range containing the most recent block shouldn't be prematurely compacted if doesn't cover the full range": { + ranges: []int64{10, 20, 40}, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 5, MaxTime: 8}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 7, MaxTime: 9}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 12}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 13, MaxTime: 15}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 10, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 5, MaxTime: 8}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 7, MaxTime: 9}}, + }}, + }, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + assert.Equal(t, testData.expected, groupBlocksByCompactableRanges(testData.blocks, testData.ranges)) + }) + } +} + +func TestGroupBlocksByRange(t *testing.T) { + tests := map[string]struct { + timeRange int64 + blocks []*metadata.Meta + expected []blocksGroup + }{ + "no input blocks": { + timeRange: 20, + blocks: nil, + expected: nil, + }, + "only 1 block in input": { + timeRange: 20, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 20, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + }}, + }, + }, + "only 1 block per range": { + timeRange: 20, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 15}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 20, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 15}}, + }}, + {rangeStart: 40, rangeEnd: 60, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + }}, + }, + }, + "multiple blocks per range": { + timeRange: 20, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 15}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 50, MaxTime: 55}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 20, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 15}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + }}, + {rangeStart: 40, rangeEnd: 60, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 40, MaxTime: 60}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 50, MaxTime: 55}}, + }}, + }, + }, + "a block with time range larger then the range should be excluded": { + timeRange: 20, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, // This block is larger then the range. + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 20, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + }}, + {rangeStart: 20, rangeEnd: 40, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + }}, + }, + }, + "blocks with different time ranges but all fitting within the input range": { + timeRange: 40, + blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + }, + expected: []blocksGroup{ + {rangeStart: 0, rangeEnd: 40, blocks: []*metadata.Meta{ + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 0, MaxTime: 40}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 10, MaxTime: 20}}, + {BlockMeta: tsdb.BlockMeta{MinTime: 20, MaxTime: 30}}, + }}, + }, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + assert.Equal(t, testData.expected, groupBlocksByRange(testData.blocks, testData.timeRange)) + }) + } +} + +func TestBlocksGroup_overlaps(t *testing.T) { + tests := []struct { + first blocksGroup + second blocksGroup + expected bool + }{ + { + first: blocksGroup{rangeStart: 10, rangeEnd: 20}, + second: blocksGroup{rangeStart: 20, rangeEnd: 30}, + expected: false, + }, { + first: blocksGroup{rangeStart: 10, rangeEnd: 20}, + second: blocksGroup{rangeStart: 19, rangeEnd: 30}, + expected: true, + }, { + first: blocksGroup{rangeStart: 10, rangeEnd: 21}, + second: blocksGroup{rangeStart: 20, rangeEnd: 30}, + expected: true, + }, { + first: blocksGroup{rangeStart: 10, rangeEnd: 20}, + second: blocksGroup{rangeStart: 12, rangeEnd: 18}, + expected: true, + }, + } + + for _, tc := range tests { + assert.Equal(t, tc.expected, tc.first.overlaps(tc.second)) + assert.Equal(t, tc.expected, tc.second.overlaps(tc.first)) + } +} diff --git a/pkg/compactor/shuffle_sharding_planner.go b/pkg/compactor/shuffle_sharding_planner.go new file mode 100644 index 00000000000..96e70f59a5f --- /dev/null +++ b/pkg/compactor/shuffle_sharding_planner.go @@ -0,0 +1,39 @@ +package compactor + +import ( + "context" + "fmt" + + "github.com/go-kit/kit/log" + "github.com/thanos-io/thanos/pkg/block/metadata" +) + +type ShuffleShardingPlanner struct { + logger log.Logger + ranges []int64 +} + +func NewShuffleShardingPlanner(logger log.Logger, ranges []int64) *ShuffleShardingPlanner { + return &ShuffleShardingPlanner{ + logger: logger, + ranges: ranges, + } +} + +func (p *ShuffleShardingPlanner) Plan(_ context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + // Ensure all blocks fits within the largest range. This is a double check + // to ensure there's no bug in the previous blocks grouping, given this Plan() + // is just a pass-through. + // Modifed from https://github.com/cortexproject/cortex/pull/2616/files#diff-e3051fc530c48bb276ba958dd8fadc684e546bd7964e6bc75cef9a86ef8df344R28-R63 + largestRange := p.ranges[len(p.ranges)-1] + rangeStart := getRangeStart(metasByMinTime[0], largestRange) + rangeEnd := rangeStart + largestRange + + for _, b := range metasByMinTime { + if b.MinTime < rangeStart || b.MaxTime > rangeEnd { + return nil, fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", b.ULID.String(), b.MinTime, b.MaxTime, rangeStart, rangeEnd) + } + } + + return metasByMinTime, nil +} From 366536a67d0efbdd1a82ac4b5b612b3558096073 Mon Sep 17 00:00:00 2001 From: Albert Date: Fri, 9 Jul 2021 19:30:53 -0700 Subject: [PATCH 002/137] update CHANGELOG.md Signed-off-by: Albert Signed-off-by: Alvin Lin --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00ff7a81c24..4eebc8305dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 +* [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 ## 1.10.0-rc.0 / 2021-06-28 From da688789c72709b165f1384fc75d036f14bb7398 Mon Sep 17 00:00:00 2001 From: Albert <32654899+ac1214@users.noreply.github.com> Date: Mon, 5 Jul 2021 08:44:57 -0700 Subject: [PATCH 003/137] Add timeout for waiting on compactor to become ACTIVE in the ring. (#4262) * add MaxRetries to WaitInstanceState Signed-off-by: Albert * update CHANGELOG.md Signed-off-by: Albert * Add timeout for waiting on compactor to become ACTIVE in the ring. Signed-off-by: Albert * add MaxRetries variable back to WaitInstanceState Signed-off-by: Albert * Fix linting issues Signed-off-by: Albert * Remove duplicate entry from changelog Signed-off-by: Albert * Address PR comments and set timeout to be configurable Signed-off-by: Albert * Address PR comments and fix tests Signed-off-by: Albert * Update unit tests Signed-off-by: Albert * Update changelog and fix linting Signed-off-by: Albert * Fixed CHANGELOG entry order Signed-off-by: Marco Pracucci Co-authored-by: Albert Co-authored-by: Marco Pracucci Signed-off-by: Alvin Lin --- CHANGELOG.md | 1 + docs/blocks-storage/compactor.md | 4 + docs/configuration/config-file-reference.md | 4 + pkg/compactor/compactor.go | 6 +- pkg/compactor/compactor_ring.go | 9 +- pkg/compactor/compactor_test.go | 33 ++++++ pkg/ring/ring.go | 4 + pkg/ring/util.go | 2 +- pkg/ring/util_test.go | 115 ++++++++++++++++++++ 9 files changed, 175 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4eebc8305dc..5de6d5a38fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## master / unreleased * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 +* [ENHANCEMENT] Add timeout for waiting on compactor to become ACTIVE in the ring. #4262 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 * [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index 6579ad4c663..6a599ace11b 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -235,4 +235,8 @@ compactor: # Name of network interface to read address from. # CLI flag: -compactor.ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] + + # Timeout for waiting on compactor to become ACTIVE in the ring. + # CLI flag: -compactor.ring.wait-active-instance-timeout + [wait_active_instance_timeout: | default = 10m] ``` diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 692349cc086..cd1afd76328 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -5199,6 +5199,10 @@ sharding_ring: # Name of network interface to read address from. # CLI flag: -compactor.ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] + + # Timeout for waiting on compactor to become ACTIVE in the ring. + # CLI flag: -compactor.ring.wait-active-instance-timeout + [wait_active_instance_timeout: | default = 10m] ``` ### `store_gateway_config` diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index f67f0e941ea..30d6b447355 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -434,7 +434,11 @@ func (c *Compactor) starting(ctx context.Context) error { // users scanner depends on the ring (to check whether an user belongs // to this shard or not). level.Info(c.logger).Log("msg", "waiting until compactor is ACTIVE in the ring") - if err := ring.WaitInstanceState(ctx, c.ring, c.ringLifecycler.ID, ring.ACTIVE); err != nil { + + ctxWithTimeout, cancel := context.WithTimeout(ctx, c.compactorCfg.ShardingRing.WaitActiveInstanceTimeout) + defer cancel() + if err := ring.WaitInstanceState(ctxWithTimeout, c.ring, c.ringLifecycler.ID, ring.ACTIVE); err != nil { + level.Error(c.logger).Log("msg", "compactor failed to become ACTIVE in the ring", "err", err) return err } level.Info(c.logger).Log("msg", "compactor is ACTIVE in the ring") diff --git a/pkg/compactor/compactor_ring.go b/pkg/compactor/compactor_ring.go index 74c3a77ad3b..5b55c8a8719 100644 --- a/pkg/compactor/compactor_ring.go +++ b/pkg/compactor/compactor_ring.go @@ -34,6 +34,10 @@ type RingConfig struct { // Injected internally ListenPort int `yaml:"-"` + + WaitActiveInstanceTimeout time.Duration `yaml:"wait_active_instance_timeout"` + + ObservePeriod time.Duration `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -59,6 +63,9 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.InstanceAddr, "compactor.ring.instance-addr", "", "IP address to advertise in the ring.") f.IntVar(&cfg.InstancePort, "compactor.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") f.StringVar(&cfg.InstanceID, "compactor.ring.instance-id", hostname, "Instance ID to register in the ring.") + + // Timeout durations + f.DurationVar(&cfg.WaitActiveInstanceTimeout, "compactor.ring.wait-active-instance-timeout", 10*time.Minute, "Timeout for waiting on compactor to become ACTIVE in the ring.") } // ToLifecyclerConfig returns a LifecyclerConfig based on the compactor @@ -87,7 +94,7 @@ func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { lc.InfNames = cfg.InstanceInterfaceNames lc.UnregisterOnShutdown = true lc.HeartbeatPeriod = cfg.HeartbeatPeriod - lc.ObservePeriod = 0 + lc.ObservePeriod = cfg.ObservePeriod lc.JoinAfter = 0 lc.MinReadyDuration = 0 lc.FinalSleep = 0 diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 9c14cb09247..da1a83cead3 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1092,6 +1092,9 @@ func prepareConfig() Config { compactorCfg.ShardingRing.WaitStabilityMinDuration = 0 compactorCfg.ShardingRing.WaitStabilityMaxDuration = 0 + // Set lower timeout for waiting on compactor to become ACTIVE in the ring for unit tests + compactorCfg.ShardingRing.WaitActiveInstanceTimeout = 5 * time.Second + return compactorCfg } @@ -1279,3 +1282,33 @@ func TestCompactor_DeleteLocalSyncFiles(t *testing.T) { require.NotEqual(t, numUsers, c1Users) require.Equal(t, numUsers, c1Users+c2Users) } + +func TestCompactor_ShouldFailCompactionOnTimeout(t *testing.T) { + t.Parallel() + + // Mock the bucket + bucketClient := &bucket.ClientMock{} + bucketClient.MockIter("", []string{}, nil) + + cfg := prepareConfig() + cfg.ShardingEnabled = true + cfg.ShardingRing.InstanceID = "compactor-1" + cfg.ShardingRing.InstanceAddr = "1.2.3.4" + cfg.ShardingRing.KVStore.Mock = consul.NewInMemoryClient(ring.GetCodec()) + + // Set ObservePeriod to longer than the timeout period to mock a timeout while waiting on ring to become ACTIVE + cfg.ShardingRing.ObservePeriod = time.Second * 10 + + c, _, _, logs, _ := prepare(t, cfg, bucketClient) + + // Try to start the compactor with a bad consul kv-store. The + err := services.StartAndAwaitRunning(context.Background(), c) + + // Assert that the compactor timed out + assert.Equal(t, context.DeadlineExceeded, err) + + assert.ElementsMatch(t, []string{ + `level=info component=compactor msg="waiting until compactor is ACTIVE in the ring"`, + `level=error component=compactor msg="compactor failed to become ACTIVE in the ring" err="context deadline exceeded"`, + }, removeIgnoredLogs(strings.Split(strings.TrimSpace(logs.String()), "\n"))) +} diff --git a/pkg/ring/ring.go b/pkg/ring/ring.go index 539ec65e75e..8d5bd0c925a 100644 --- a/pkg/ring/ring.go +++ b/pkg/ring/ring.go @@ -71,6 +71,10 @@ type ReadRing interface { // and size (number of instances). ShuffleShard(identifier string, size int) ReadRing + // GetInstanceState returns the current state of an instance or an error if the + // instance does not exist in the ring. + GetInstanceState(instanceID string) (InstanceState, error) + // ShuffleShardWithLookback is like ShuffleShard() but the returned subring includes // all instances that have been part of the identifier's shard since "now - lookbackPeriod". ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) ReadRing diff --git a/pkg/ring/util.go b/pkg/ring/util.go index ac5c27388c9..06e053dd269 100644 --- a/pkg/ring/util.go +++ b/pkg/ring/util.go @@ -69,7 +69,7 @@ func GetInstancePort(configPort, listenPort int) int { // WaitInstanceState waits until the input instanceID is registered within the // ring matching the provided state. A timeout should be provided within the context. -func WaitInstanceState(ctx context.Context, r *Ring, instanceID string, state InstanceState) error { +func WaitInstanceState(ctx context.Context, r ReadRing, instanceID string, state InstanceState) error { backoff := util.NewBackoff(ctx, util.BackoffConfig{ MinBackoff: 100 * time.Millisecond, MaxBackoff: time.Second, diff --git a/pkg/ring/util_test.go b/pkg/ring/util_test.go index c3a3e037e9e..9a4a69c6904 100644 --- a/pkg/ring/util_test.go +++ b/pkg/ring/util_test.go @@ -6,10 +6,65 @@ import ( "testing" "time" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) +type RingMock struct { + mock.Mock +} + +func (r *RingMock) Collect(ch chan<- prometheus.Metric) {} + +func (r *RingMock) Describe(ch chan<- *prometheus.Desc) {} + +func (r *RingMock) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, bufZones []string) (ReplicationSet, error) { + args := r.Called(key, op, bufDescs, bufHosts, bufZones) + return args.Get(0).(ReplicationSet), args.Error(1) +} + +func (r *RingMock) GetAllHealthy(op Operation) (ReplicationSet, error) { + args := r.Called(op) + return args.Get(0).(ReplicationSet), args.Error(1) +} + +func (r *RingMock) GetReplicationSetForOperation(op Operation) (ReplicationSet, error) { + args := r.Called(op) + return args.Get(0).(ReplicationSet), args.Error(1) +} + +func (r *RingMock) ReplicationFactor() int { + return 0 +} + +func (r *RingMock) InstancesCount() int { + return 0 +} + +func (r *RingMock) ShuffleShard(identifier string, size int) ReadRing { + args := r.Called(identifier, size) + return args.Get(0).(ReadRing) +} + +func (r *RingMock) GetInstanceState(instanceID string) (InstanceState, error) { + args := r.Called(instanceID) + return args.Get(0).(InstanceState), args.Error(1) +} + +func (r *RingMock) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) ReadRing { + args := r.Called(identifier, size, lookbackPeriod, now) + return args.Get(0).(ReadRing) +} + +func (r *RingMock) HasInstance(instanceID string) bool { + return true +} + +func (r *RingMock) CleanupShuffleShardCache(identifier string) {} + func TestGenerateTokens(t *testing.T) { tokens := GenerateTokens(1000000, nil) @@ -184,3 +239,63 @@ func TestWaitRingStabilityShouldReturnErrorIfMaxWaitingIsReached(t *testing.T) { assert.InDelta(t, maxWaiting, elapsedTime, float64(2*time.Second)) } + +func TestWaitInstanceStateTimeout(t *testing.T) { + t.Parallel() + + const ( + instanceID = "test" + timeoutDuration = time.Second + ) + + ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) + defer cancel() + + ring := &RingMock{} + ring.On("GetInstanceState", mock.Anything, mock.Anything).Return(ACTIVE, nil) + + err := WaitInstanceState(ctx, ring, instanceID, PENDING) + + assert.Equal(t, context.DeadlineExceeded, err) + ring.AssertCalled(t, "GetInstanceState", instanceID) +} + +func TestWaitInstanceStateTimeoutOnError(t *testing.T) { + t.Parallel() + + const ( + instanceID = "test" + timeoutDuration = time.Second + ) + + ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) + defer cancel() + + ring := &RingMock{} + ring.On("GetInstanceState", mock.Anything, mock.Anything).Return(PENDING, errors.New("instance not found in the ring")) + + err := WaitInstanceState(ctx, ring, instanceID, ACTIVE) + + assert.Equal(t, context.DeadlineExceeded, err) + ring.AssertCalled(t, "GetInstanceState", instanceID) +} + +func TestWaitInstanceStateExitsAfterActualStateEqualsState(t *testing.T) { + t.Parallel() + + const ( + instanceID = "test" + timeoutDuration = time.Second + ) + + ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) + defer cancel() + + ring := &RingMock{} + ring.On("GetInstanceState", mock.Anything, mock.Anything).Return(ACTIVE, nil) + + err := WaitInstanceState(ctx, ring, instanceID, ACTIVE) + + assert.Nil(t, err) + ring.AssertNumberOfCalls(t, "GetInstanceState", 1) +} From 18e05badaffb79c14ef22723d8d80b3e8fdce65f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 6 Jul 2021 14:26:29 +0100 Subject: [PATCH 004/137] MergeIterator: allocate less memory at first (#4341) * MergeIterator: allocate less memory at first We were allocating 24x the number of streams of batches, where each batch holds up to 12 samples. By allowing `c.batches` to reallocate when needed, we avoid the need to pre-allocate enough memory for all possible scenarios. * chunk_test: fix innacurate end time on chunks The `through` time is supposed to be the last time in the chunk, and having it one step higher was throwing off other tests and benchmarks. * MergeIterator benchmark: add more realistic sizes At 15-second scrape intervals a chunk covers 30 minutes, so 1,000 chunks is about three weeks, a highly un-representative test. Instant queries, such as those done by the ruler, will only fetch one chunk from each ingester. Signed-off-by: Bryan Boreham Signed-off-by: Alvin Lin --- CHANGELOG.md | 1 + pkg/querier/batch/batch_test.go | 4 ++++ pkg/querier/batch/chunk_test.go | 1 + pkg/querier/batch/merge.go | 7 +++---- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5de6d5a38fb..51d62a6efd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [ENHANCEMENT] Add timeout for waiting on compactor to become ACTIVE in the ring. #4262 +* [ENHANCEMENT] Reduce memory used by streaming queries, particularly in ruler. #4341 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 * [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 diff --git a/pkg/querier/batch/batch_test.go b/pkg/querier/batch/batch_test.go index baacbcc7192..9b9ffec16e6 100644 --- a/pkg/querier/batch/batch_test.go +++ b/pkg/querier/batch/batch_test.go @@ -27,6 +27,10 @@ func BenchmarkNewChunkMergeIterator_CreateAndIterate(b *testing.B) { {numChunks: 1000, numSamplesPerChunk: 100, duplicationFactor: 3, enc: promchunk.DoubleDelta}, {numChunks: 1000, numSamplesPerChunk: 100, duplicationFactor: 1, enc: promchunk.PrometheusXorChunk}, {numChunks: 1000, numSamplesPerChunk: 100, duplicationFactor: 3, enc: promchunk.PrometheusXorChunk}, + {numChunks: 100, numSamplesPerChunk: 100, duplicationFactor: 1, enc: promchunk.PrometheusXorChunk}, + {numChunks: 100, numSamplesPerChunk: 100, duplicationFactor: 3, enc: promchunk.PrometheusXorChunk}, + {numChunks: 1, numSamplesPerChunk: 100, duplicationFactor: 1, enc: promchunk.PrometheusXorChunk}, + {numChunks: 1, numSamplesPerChunk: 100, duplicationFactor: 3, enc: promchunk.PrometheusXorChunk}, } for _, scenario := range scenarios { diff --git a/pkg/querier/batch/chunk_test.go b/pkg/querier/batch/chunk_test.go index 40a3feb4b28..e672811827c 100644 --- a/pkg/querier/batch/chunk_test.go +++ b/pkg/querier/batch/chunk_test.go @@ -59,6 +59,7 @@ func mkChunk(t require.TestingT, from model.Time, points int, enc promchunk.Enco require.Nil(t, npc) ts = ts.Add(step) } + ts = ts.Add(-step) // undo the add that we did just before exiting the loop return chunk.NewChunk(userID, fp, metric, pc, from, ts) } diff --git a/pkg/querier/batch/merge.go b/pkg/querier/batch/merge.go index 88220bd7273..7764b37467b 100644 --- a/pkg/querier/batch/merge.go +++ b/pkg/querier/batch/merge.go @@ -31,8 +31,8 @@ func newMergeIterator(cs []GenericChunk) *mergeIterator { c := &mergeIterator{ its: its, h: make(iteratorHeap, 0, len(its)), - batches: make(batchStream, 0, len(its)*2*promchunk.BatchSize), - batchesBuf: make(batchStream, len(its)*2*promchunk.BatchSize), + batches: make(batchStream, 0, len(its)), + batchesBuf: make(batchStream, len(its)), } for _, iter := range c.its { @@ -112,8 +112,7 @@ func (c *mergeIterator) buildNextBatch(size int) bool { for len(c.h) > 0 && (len(c.batches) == 0 || c.nextBatchEndTime() >= c.h[0].AtTime()) { c.nextBatchBuf[0] = c.h[0].Batch() c.batchesBuf = mergeStreams(c.batches, c.nextBatchBuf[:], c.batchesBuf, size) - copy(c.batches[:len(c.batchesBuf)], c.batchesBuf) - c.batches = c.batches[:len(c.batchesBuf)] + c.batches = append(c.batches[:0], c.batchesBuf...) if c.h[0].Next(size) { heap.Fix(&c.h, 0) From abcb297adabf66d9d75750988dbea3e661081c3f Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Wed, 7 Jul 2021 11:10:35 +0200 Subject: [PATCH 005/137] Expose default configuration values for memberlist. (#4276) * Expose default configuration values for memberlist. Set the defaults for various memberlist configuration values based on the "Default LAN" configuration. The only result of this change is that the defaults are now visible and are in the documentation. This also means that if the default values change, then the changes are visible in the documentation, where as before they would have gone unnoticed. To prevent this being a breaking change, the existing behaviour is retained, in case anyone is explicitly setting the values to zero and expecting the default to be used. Signed-off-by: Steve Simpson * Remove use of zero value as default value indicator. Signed-off-by: Steve Simpson * Review comments. Signed-off-by: Steve Simpson * Review comments. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- CHANGELOG.md | 8 + docs/configuration/config-file-reference.md | 25 ++- pkg/cortex/cortex.go | 2 +- .../kv/memberlist/kv_init_service_test.go | 3 + pkg/ring/kv/memberlist/memberlist_client.go | 57 ++++--- .../kv/memberlist/memberlist_client_test.go | 146 +++++++++--------- 6 files changed, 122 insertions(+), 119 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51d62a6efd1..732b329900f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,14 @@ * [CHANGE] Change default value of `-server.grpc.keepalive.min-time-between-pings` from `5m` to `10s` and `-server.grpc.keepalive.ping-without-stream-allowed` to `true`. #4168 * [CHANGE] Ingester: Change default value of `-ingester.active-series-metrics-enabled` to `true`. This incurs a small increase in memory usage, between 1.2% and 1.6% as measured on ingesters with 1.3M active series. #4257 * [CHANGE] Dependency: update go-redis from v8.2.3 to v8.9.0. #4236 +* [CHANGE] Memberlist: Expose default configuration values to the command line options. Note that setting these explicitly to zero will no longer cause the default to be used. If the default is desired, then do set the option. The following are affected: #4276 + - `-memberlist.stream-timeout` + - `-memberlist.retransmit-factor` + - `-memberlist.pull-push-interval` + - `-memberlist.gossip-interval` + - `-memberlist.gossip-nodes` + - `-memberlist.gossip-to-dead-nodes-time` + - `-memberlist.dead-node-reclaim-time` * [FEATURE] Querier: Added new `-querier.max-fetched-series-per-query` flag. When Cortex is running with blocks storage, the max series per query limit is enforced in the querier and applies to unique series received from ingesters and store-gateway (long-term storage). #4179 * [FEATURE] Querier/Ruler: Added new `-querier.max-fetched-chunk-bytes-per-query` flag. When Cortex is running with blocks storage, the max chunk bytes limit is enforced in the querier and ruler and limits the size of all aggregated chunks returned from ingesters and storage as bytes for a query. #4216 * [FEATURE] Alertmanager: support negative matchers, time-based muting - [upstream release notes](https://github.com/prometheus/alertmanager/releases/tag/v0.22.0). #4237 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index cd1afd76328..cc6e8dd7449 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -3761,33 +3761,32 @@ The `memberlist_config` configures the Gossip memberlist. [randomize_node_name: | default = true] # The timeout for establishing a connection with a remote node, and for -# read/write operations. Uses memberlist LAN defaults if 0. +# read/write operations. # CLI flag: -memberlist.stream-timeout -[stream_timeout: | default = 0s] +[stream_timeout: | default = 10s] # Multiplication factor used when sending out messages (factor * log(N+1)). # CLI flag: -memberlist.retransmit-factor -[retransmit_factor: | default = 0] +[retransmit_factor: | default = 4] -# How often to use pull/push sync. Uses memberlist LAN defaults if 0. +# How often to use pull/push sync. # CLI flag: -memberlist.pullpush-interval -[pull_push_interval: | default = 0s] +[pull_push_interval: | default = 30s] -# How often to gossip. Uses memberlist LAN defaults if 0. +# How often to gossip. # CLI flag: -memberlist.gossip-interval -[gossip_interval: | default = 0s] +[gossip_interval: | default = 200ms] -# How many nodes to gossip to. Uses memberlist LAN defaults if 0. +# How many nodes to gossip to. # CLI flag: -memberlist.gossip-nodes -[gossip_nodes: | default = 0] +[gossip_nodes: | default = 3] # How long to keep gossiping to dead nodes, to give them chance to refute their -# death. Uses memberlist LAN defaults if 0. +# death. # CLI flag: -memberlist.gossip-to-dead-nodes-time -[gossip_to_dead_nodes_time: | default = 0s] +[gossip_to_dead_nodes_time: | default = 30s] -# How soon can dead node's name be reclaimed with new address. Defaults to 0, -# which is disabled. +# How soon can dead node's name be reclaimed with new address. 0 to disable. # CLI flag: -memberlist.dead-node-reclaim-time [dead_node_reclaim_time: | default = 0s] diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 3f198611117..76fb18e0f0a 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -170,7 +170,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Alertmanager.RegisterFlags(f) c.AlertmanagerStorage.RegisterFlags(f) c.RuntimeConfig.RegisterFlags(f) - c.MemberlistKV.RegisterFlags(f, "") + c.MemberlistKV.RegisterFlags(f) c.QueryScheduler.RegisterFlags(f) // These don't seem to have a home. diff --git a/pkg/ring/kv/memberlist/kv_init_service_test.go b/pkg/ring/kv/memberlist/kv_init_service_test.go index a6e7001f0da..221da9a66f3 100644 --- a/pkg/ring/kv/memberlist/kv_init_service_test.go +++ b/pkg/ring/kv/memberlist/kv_init_service_test.go @@ -7,6 +7,8 @@ import ( "github.com/hashicorp/memberlist" "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/util/flagext" ) func TestPage(t *testing.T) { @@ -53,6 +55,7 @@ func TestPage(t *testing.T) { func TestStop(t *testing.T) { var cfg KVConfig + flagext.DefaultValues(&cfg) kvinit := NewKVInitService(&cfg, nil) require.NoError(t, kvinit.stopping(nil)) } diff --git a/pkg/ring/kv/memberlist/memberlist_client.go b/pkg/ring/kv/memberlist/memberlist_client.go index e2d9ede504d..c0f962993b5 100644 --- a/pkg/ring/kv/memberlist/memberlist_client.go +++ b/pkg/ring/kv/memberlist/memberlist_client.go @@ -165,12 +165,14 @@ type KVConfig struct { } // RegisterFlags registers flags. -func (cfg *KVConfig) RegisterFlags(f *flag.FlagSet, prefix string) { +func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + mlDefaults := defaultMemberlistConfig() + // "Defaults to hostname" -- memberlist sets it to hostname by default. f.StringVar(&cfg.NodeName, prefix+"memberlist.nodename", "", "Name of the node in memberlist cluster. Defaults to hostname.") // memberlist.DefaultLANConfig will put hostname here. f.BoolVar(&cfg.RandomizeNodeName, prefix+"memberlist.randomize-node-name", true, "Add random suffix to the node name.") - f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", 0, "The timeout for establishing a connection with a remote node, and for read/write operations. Uses memberlist LAN defaults if 0.") - f.IntVar(&cfg.RetransmitMult, prefix+"memberlist.retransmit-factor", 0, "Multiplication factor used when sending out messages (factor * log(N+1)).") + f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", mlDefaults.TCPTimeout, "The timeout for establishing a connection with a remote node, and for read/write operations.") + f.IntVar(&cfg.RetransmitMult, prefix+"memberlist.retransmit-factor", mlDefaults.RetransmitMult, "Multiplication factor used when sending out messages (factor * log(N+1)).") f.Var(&cfg.JoinMembers, prefix+"memberlist.join", "Other cluster members to join. Can be specified multiple times. It can be an IP, hostname or an entry specified in the DNS Service Discovery format (see https://cortexmetrics.io/docs/configuration/arguments/#dns-service-discovery for more details).") f.DurationVar(&cfg.MinJoinBackoff, prefix+"memberlist.min-join-backoff", 1*time.Second, "Min backoff duration to join other cluster members.") f.DurationVar(&cfg.MaxJoinBackoff, prefix+"memberlist.max-join-backoff", 1*time.Minute, "Max backoff duration to join other cluster members.") @@ -179,16 +181,20 @@ func (cfg *KVConfig) RegisterFlags(f *flag.FlagSet, prefix string) { f.DurationVar(&cfg.RejoinInterval, prefix+"memberlist.rejoin-interval", 0, "If not 0, how often to rejoin the cluster. Occasional rejoin can help to fix the cluster split issue, and is harmless otherwise. For example when using only few components as a seed nodes (via -memberlist.join), then it's recommended to use rejoin. If -memberlist.join points to dynamic service that resolves to all gossiping nodes (eg. Kubernetes headless service), then rejoin is not needed.") f.DurationVar(&cfg.LeftIngestersTimeout, prefix+"memberlist.left-ingesters-timeout", 5*time.Minute, "How long to keep LEFT ingesters in the ring.") f.DurationVar(&cfg.LeaveTimeout, prefix+"memberlist.leave-timeout", 5*time.Second, "Timeout for leaving memberlist cluster.") - f.DurationVar(&cfg.GossipInterval, prefix+"memberlist.gossip-interval", 0, "How often to gossip. Uses memberlist LAN defaults if 0.") - f.IntVar(&cfg.GossipNodes, prefix+"memberlist.gossip-nodes", 0, "How many nodes to gossip to. Uses memberlist LAN defaults if 0.") - f.DurationVar(&cfg.PushPullInterval, prefix+"memberlist.pullpush-interval", 0, "How often to use pull/push sync. Uses memberlist LAN defaults if 0.") - f.DurationVar(&cfg.GossipToTheDeadTime, prefix+"memberlist.gossip-to-dead-nodes-time", 0, "How long to keep gossiping to dead nodes, to give them chance to refute their death. Uses memberlist LAN defaults if 0.") - f.DurationVar(&cfg.DeadNodeReclaimTime, prefix+"memberlist.dead-node-reclaim-time", 0, "How soon can dead node's name be reclaimed with new address. Defaults to 0, which is disabled.") + f.DurationVar(&cfg.GossipInterval, prefix+"memberlist.gossip-interval", mlDefaults.GossipInterval, "How often to gossip.") + f.IntVar(&cfg.GossipNodes, prefix+"memberlist.gossip-nodes", mlDefaults.GossipNodes, "How many nodes to gossip to.") + f.DurationVar(&cfg.PushPullInterval, prefix+"memberlist.pullpush-interval", mlDefaults.PushPullInterval, "How often to use pull/push sync.") + f.DurationVar(&cfg.GossipToTheDeadTime, prefix+"memberlist.gossip-to-dead-nodes-time", mlDefaults.GossipToTheDeadTime, "How long to keep gossiping to dead nodes, to give them chance to refute their death.") + f.DurationVar(&cfg.DeadNodeReclaimTime, prefix+"memberlist.dead-node-reclaim-time", mlDefaults.DeadNodeReclaimTime, "How soon can dead node's name be reclaimed with new address. 0 to disable.") f.IntVar(&cfg.MessageHistoryBufferBytes, prefix+"memberlist.message-history-buffer-bytes", 0, "How much space to use for keeping received and sent messages in memory for troubleshooting (two buffers). 0 to disable.") cfg.TCPTransport.RegisterFlags(f, prefix) } +func (cfg *KVConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix(f, "") +} + func generateRandomSuffix() string { suffix := make([]byte, 4) _, err := rand.Read(suffix) @@ -345,36 +351,27 @@ func NewKV(cfg KVConfig, logger log.Logger) *KV { return mlkv } +func defaultMemberlistConfig() *memberlist.Config { + return memberlist.DefaultLANConfig() +} + func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { tr, err := NewTCPTransport(m.cfg.TCPTransport, m.logger) if err != nil { return nil, fmt.Errorf("failed to create transport: %v", err) } - mlCfg := memberlist.DefaultLANConfig() + mlCfg := defaultMemberlistConfig() mlCfg.Delegate = m - if m.cfg.StreamTimeout != 0 { - mlCfg.TCPTimeout = m.cfg.StreamTimeout - } - if m.cfg.RetransmitMult != 0 { - mlCfg.RetransmitMult = m.cfg.RetransmitMult - } - if m.cfg.PushPullInterval != 0 { - mlCfg.PushPullInterval = m.cfg.PushPullInterval - } - if m.cfg.GossipInterval != 0 { - mlCfg.GossipInterval = m.cfg.GossipInterval - } - if m.cfg.GossipNodes != 0 { - mlCfg.GossipNodes = m.cfg.GossipNodes - } - if m.cfg.GossipToTheDeadTime > 0 { - mlCfg.GossipToTheDeadTime = m.cfg.GossipToTheDeadTime - } - if m.cfg.DeadNodeReclaimTime > 0 { - mlCfg.DeadNodeReclaimTime = m.cfg.DeadNodeReclaimTime - } + mlCfg.TCPTimeout = m.cfg.StreamTimeout + mlCfg.RetransmitMult = m.cfg.RetransmitMult + mlCfg.PushPullInterval = m.cfg.PushPullInterval + mlCfg.GossipInterval = m.cfg.GossipInterval + mlCfg.GossipNodes = m.cfg.GossipNodes + mlCfg.GossipToTheDeadTime = m.cfg.GossipToTheDeadTime + mlCfg.DeadNodeReclaimTime = m.cfg.DeadNodeReclaimTime + if m.cfg.NodeName != "" { mlCfg.Name = m.cfg.NodeName } diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index 22e598197f1..de337571da3 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" ) @@ -207,12 +208,12 @@ func TestBasicGetAndCas(t *testing.T) { c := dataCodec{} name := "Ing 1" - cfg := KVConfig{ - TCPTransport: TCPTransportConfig{ - BindAddrs: []string{"localhost"}, - }, - Codecs: []codec.Codec{c}, + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, } + cfg.Codecs = []codec.Codec{c} mkv := NewKV(cfg, log.NewNopLogger()) require.NoError(t, services.StartAndAwaitRunning(context.Background(), mkv)) @@ -265,10 +266,10 @@ func withFixtures(t *testing.T, testFN func(t *testing.T, kv *Client)) { c := dataCodec{} - cfg := KVConfig{ - TCPTransport: TCPTransportConfig{}, - Codecs: []codec.Codec{c}, - } + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.TCPTransport = TCPTransportConfig{} + cfg.Codecs = []codec.Codec{c} mkv := NewKV(cfg, log.NewNopLogger()) require.NoError(t, services.StartAndAwaitRunning(context.Background(), mkv)) @@ -410,9 +411,9 @@ func TestCASFailedBecauseOfVersionChanges(t *testing.T) { func TestMultipleCAS(t *testing.T) { c := dataCodec{} - cfg := KVConfig{ - Codecs: []codec.Codec{c}, - } + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.Codecs = []codec.Codec{c} mkv := NewKV(cfg, log.NewNopLogger()) mkv.maxCasRetries = 20 @@ -501,26 +502,21 @@ func TestMultipleClients(t *testing.T) { for i := 0; i < members; i++ { id := fmt.Sprintf("Member-%d", i) - cfg := KVConfig{ - NodeName: id, - - // some useful parameters when playing with higher number of members - // RetransmitMult: 2, - GossipInterval: 100 * time.Millisecond, - GossipNodes: 3, - PushPullInterval: 5 * time.Second, - // PacketDialTimeout: 5 * time.Second, - // StreamTimeout: 5 * time.Second, - // PacketWriteTimeout: 2 * time.Second, - - TCPTransport: TCPTransportConfig{ - BindAddrs: []string{"localhost"}, - BindPort: 0, // randomize ports - }, - - Codecs: []codec.Codec{c}, + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.NodeName = id + + cfg.GossipInterval = 100 * time.Millisecond + cfg.GossipNodes = 3 + cfg.PushPullInterval = 5 * time.Second + + cfg.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, + BindPort: 0, // randomize ports } + cfg.Codecs = []codec.Codec{c} + mkv := NewKV(cfg, log.NewNopLogger()) require.NoError(t, services.StartAndAwaitRunning(context.Background(), mkv)) @@ -645,26 +641,26 @@ func TestJoinMembersWithRetryBackoff(t *testing.T) { for i, port := range ports { id := fmt.Sprintf("Member-%d", i) - cfg := KVConfig{ - NodeName: id, + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.NodeName = id - GossipInterval: 100 * time.Millisecond, - GossipNodes: 3, - PushPullInterval: 5 * time.Second, + cfg.GossipInterval = 100 * time.Millisecond + cfg.GossipNodes = 3 + cfg.PushPullInterval = 5 * time.Second - MinJoinBackoff: 100 * time.Millisecond, - MaxJoinBackoff: 1 * time.Minute, - MaxJoinRetries: 10, - AbortIfJoinFails: true, + cfg.MinJoinBackoff = 100 * time.Millisecond + cfg.MaxJoinBackoff = 1 * time.Minute + cfg.MaxJoinRetries = 10 + cfg.AbortIfJoinFails = true - TCPTransport: TCPTransportConfig{ - BindAddrs: []string{"localhost"}, - BindPort: port, - }, - - Codecs: []codec.Codec{c}, + cfg.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, + BindPort: port, } + cfg.Codecs = []codec.Codec{c} + if i == 0 { // Add members to first KV config to join immediately on initialization. // This will enforce backoff as each next members listener is not open yet. @@ -732,21 +728,21 @@ func TestMemberlistFailsToJoin(t *testing.T) { ports, err := getFreePorts(1) require.NoError(t, err) - cfg := KVConfig{ - MinJoinBackoff: 100 * time.Millisecond, - MaxJoinBackoff: 100 * time.Millisecond, - MaxJoinRetries: 2, - AbortIfJoinFails: true, + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.MinJoinBackoff = 100 * time.Millisecond + cfg.MaxJoinBackoff = 100 * time.Millisecond + cfg.MaxJoinRetries = 2 + cfg.AbortIfJoinFails = true - TCPTransport: TCPTransportConfig{ - BindAddrs: []string{"localhost"}, - BindPort: 0, - }, + cfg.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, + BindPort: 0, + } - JoinMembers: []string{fmt.Sprintf("127.0.0.1:%d", ports[0])}, + cfg.JoinMembers = []string{fmt.Sprintf("127.0.0.1:%d", ports[0])} - Codecs: []codec.Codec{c}, - } + cfg.Codecs = []codec.Codec{c} mkv := NewKV(cfg, log.NewNopLogger()) require.NoError(t, services.StartAndAwaitRunning(context.Background(), mkv)) @@ -899,16 +895,16 @@ func (d distributedCounterCodec) Encode(val interface{}) ([]byte, error) { var _ codec.Codec = &distributedCounterCodec{} func TestMultipleCodecs(t *testing.T) { - cfg := KVConfig{ - TCPTransport: TCPTransportConfig{ - BindAddrs: []string{"localhost"}, - BindPort: 0, // randomize - }, + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, + BindPort: 0, // randomize + } - Codecs: []codec.Codec{ - dataCodec{}, - distributedCounterCodec{}, - }, + cfg.Codecs = []codec.Codec{ + dataCodec{}, + distributedCounterCodec{}, } mkv1 := NewKV(cfg, log.NewNopLogger()) @@ -990,17 +986,17 @@ func TestRejoin(t *testing.T) { ports, err := getFreePorts(2) require.NoError(t, err) - cfg1 := KVConfig{ - TCPTransport: TCPTransportConfig{ - BindAddrs: []string{"localhost"}, - BindPort: ports[0], - }, - - RandomizeNodeName: true, - Codecs: []codec.Codec{dataCodec{}}, - AbortIfJoinFails: false, + var cfg1 KVConfig + flagext.DefaultValues(&cfg1) + cfg1.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, + BindPort: ports[0], } + cfg1.RandomizeNodeName = true + cfg1.Codecs = []codec.Codec{dataCodec{}} + cfg1.AbortIfJoinFails = false + cfg2 := cfg1 cfg2.TCPTransport.BindPort = ports[1] cfg2.JoinMembers = []string{fmt.Sprintf("localhost:%d", ports[0])} From c3d412cddc567982a1bd73f6609ed458a3aa6070 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 9 Jul 2021 08:51:38 +0200 Subject: [PATCH 006/137] Allow setting ring heartbeat timeout to zero to disable timeout check. (#4342) * Allow setting ring heartbeat timeout to zero to disable timeout check. This change allows the various ring heartbeat timeouts to be configured with zero, as a means of disabling the timeout. This is expected to be used with a separate enhancement to allow disabling heartbeats. When the heartbeat timeout is disabled, instances will always appear as healthy in the ring. Signed-off-by: Steve Simpson * Review comments. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- CHANGELOG.md | 7 +++++++ docs/blocks-storage/compactor.md | 2 +- docs/blocks-storage/store-gateway.md | 4 ++-- docs/configuration/config-file-reference.md | 13 ++++++------ docs/configuration/v1-guarantees.md | 7 +++++++ pkg/alertmanager/alertmanager_ring.go | 2 +- pkg/compactor/compactor_ring.go | 2 +- pkg/distributor/distributor_ring.go | 2 +- pkg/ring/model.go | 13 ++++++++++-- pkg/ring/model_test.go | 8 +++++++ pkg/ring/ring.go | 2 +- pkg/ring/ring_test.go | 23 +++++++++++++++++++-- pkg/ruler/ruler_ring.go | 2 +- pkg/storegateway/gateway_ring.go | 2 +- 14 files changed, 70 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 732b329900f..95bd7af7b89 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [ENHANCEMENT] Add timeout for waiting on compactor to become ACTIVE in the ring. #4262 * [ENHANCEMENT] Reduce memory used by streaming queries, particularly in ruler. #4341 +* [ENHANCEMENT] Ring: allow experimental configuration of disabling of heartbeat timeouts by setting the relevant configuration value to zero. Applies to the following: #4342 + * `-distributor.ring.heartbeat-timeout` + * `-ring.heartbeat-timeout` + * `-ruler.ring.heartbeat-timeout` + * `-alertmanager.sharding-ring.heartbeat-timeout` + * `-compactor.ring.heartbeat-timeout` + * `-store-gateway.sharding-ring.heartbeat-timeout` * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 * [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index 6a599ace11b..f5a88c52daa 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -219,7 +219,7 @@ compactor: [heartbeat_period: | default = 5s] # The heartbeat timeout after which compactors are considered unhealthy - # within the ring. + # within the ring. 0 = never (timeout disabled). # CLI flag: -compactor.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index d24813be5f6..3cd768fea25 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -237,8 +237,8 @@ store_gateway: [heartbeat_period: | default = 15s] # The heartbeat timeout after which store gateways are considered unhealthy - # within the ring. This option needs be set both on the store-gateway and - # querier when running in microservices mode. + # within the ring. 0 = never (timeout disabled). This option needs be set + # both on the store-gateway and querier when running in microservices mode. # CLI flag: -store-gateway.sharding-ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index cc6e8dd7449..cb148bed356 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -568,7 +568,7 @@ ring: [heartbeat_period: | default = 5s] # The heartbeat timeout after which distributors are considered unhealthy - # within the ring. + # within the ring. 0 = never (timeout disabled). # CLI flag: -distributor.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -662,6 +662,7 @@ lifecycler: [mirror_timeout: | default = 2s] # The heartbeat timeout after which ingesters are skipped for reads/writes. + # 0 = never (timeout disabled). # CLI flag: -ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1585,7 +1586,7 @@ ring: [heartbeat_period: | default = 5s] # The heartbeat timeout after which rulers are considered unhealthy within the - # ring. + # ring. 0 = never (timeout disabled). # CLI flag: -ruler.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -1906,7 +1907,7 @@ sharding_ring: [heartbeat_period: | default = 15s] # The heartbeat timeout after which alertmanagers are considered unhealthy - # within the ring. + # within the ring. 0 = never (timeout disabled). # CLI flag: -alertmanager.sharding-ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -5182,7 +5183,7 @@ sharding_ring: [heartbeat_period: | default = 5s] # The heartbeat timeout after which compactors are considered unhealthy within - # the ring. + # the ring. 0 = never (timeout disabled). # CLI flag: -compactor.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] @@ -5260,8 +5261,8 @@ sharding_ring: [heartbeat_period: | default = 15s] # The heartbeat timeout after which store gateways are considered unhealthy - # within the ring. This option needs be set both on the store-gateway and - # querier when running in microservices mode. + # within the ring. 0 = never (timeout disabled). This option needs be set both + # on the store-gateway and querier when running in microservices mode. # CLI flag: -store-gateway.sharding-ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 8c87104a224..36e39221ffe 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -81,3 +81,10 @@ Currently experimental features are: - user config size (`-alertmanager.max-config-size-bytes`) - templates count in user config (`-alertmanager.max-templates-count`) - max template size (`-alertmanager.max-template-size-bytes`) +- Disabling ring heartbeat timeouts + - `-distributor.ring.heartbeat-timeout=0` + - `-ring.heartbeat-timeout=0` + - `-ruler.ring.heartbeat-timeout=0` + - `-alertmanager.sharding-ring.heartbeat-timeout=0` + - `-compactor.ring.heartbeat-timeout=0` + - `-store-gateway.sharding-ring.heartbeat-timeout=0` \ No newline at end of file diff --git a/pkg/alertmanager/alertmanager_ring.go b/pkg/alertmanager/alertmanager_ring.go index 04d08d2d656..f0b707a0901 100644 --- a/pkg/alertmanager/alertmanager_ring.go +++ b/pkg/alertmanager/alertmanager_ring.go @@ -77,7 +77,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix(rfprefix, "alertmanagers/", f) f.DurationVar(&cfg.HeartbeatPeriod, rfprefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") - f.DurationVar(&cfg.HeartbeatTimeout, rfprefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which alertmanagers are considered unhealthy within the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, rfprefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which alertmanagers are considered unhealthy within the ring. 0 = never (timeout disabled).") f.IntVar(&cfg.ReplicationFactor, rfprefix+"replication-factor", 3, "The replication factor to use when sharding the alertmanager.") f.BoolVar(&cfg.ZoneAwarenessEnabled, rfprefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate alerts across different availability zones.") diff --git a/pkg/compactor/compactor_ring.go b/pkg/compactor/compactor_ring.go index 5b55c8a8719..d384220a327 100644 --- a/pkg/compactor/compactor_ring.go +++ b/pkg/compactor/compactor_ring.go @@ -51,7 +51,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("compactor.ring.", "collectors/", f) f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") - f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).") // Wait stability flags. f.DurationVar(&cfg.WaitStabilityMinDuration, "compactor.ring.wait-stability-min-duration", time.Minute, "Minimum time to wait for ring stability at startup. 0 to disable.") diff --git a/pkg/distributor/distributor_ring.go b/pkg/distributor/distributor_ring.go index ee96a7d37cd..ce70464e342 100644 --- a/pkg/distributor/distributor_ring.go +++ b/pkg/distributor/distributor_ring.go @@ -43,7 +43,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("distributor.ring.", "collectors/", f) f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") - f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} diff --git a/pkg/ring/model.go b/pkg/ring/model.go index c10281080ef..dac6103b52a 100644 --- a/pkg/ring/model.go +++ b/pkg/ring/model.go @@ -101,7 +101,7 @@ func (d *Desc) FindIngestersByState(state InstanceState) []InstanceDesc { func (d *Desc) Ready(now time.Time, heartbeatTimeout time.Duration) error { numTokens := 0 for id, ingester := range d.Ingesters { - if now.Sub(time.Unix(ingester.Timestamp, 0)) > heartbeatTimeout { + if !ingester.IsHeartbeatHealthy(heartbeatTimeout, now) { return fmt.Errorf("instance %s past heartbeat timeout", id) } else if ingester.State != ACTIVE { return fmt.Errorf("instance %s in state %v", id, ingester.State) @@ -136,7 +136,16 @@ func (i *InstanceDesc) GetRegisteredAt() time.Time { func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool { healthy := op.IsInstanceInStateHealthy(i.State) - return healthy && now.Unix()-i.Timestamp <= heartbeatTimeout.Milliseconds()/1000 + return healthy && i.IsHeartbeatHealthy(heartbeatTimeout, now) +} + +// IsHeartbeatHealthy returns whether the heartbeat timestamp for the ingester is within the +// specified timeout period. A timeout of zero disables the timeout; the heartbeat is ignored. +func (i *InstanceDesc) IsHeartbeatHealthy(heartbeatTimeout time.Duration, now time.Time) bool { + if heartbeatTimeout == 0 { + return true + } + return now.Sub(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout } // Merge merges other ring into this one. Returns sub-ring that represents the change, diff --git a/pkg/ring/model_test.go b/pkg/ring/model_test.go index 9570abf17d0..1d73e6f98b9 100644 --- a/pkg/ring/model_test.go +++ b/pkg/ring/model_test.go @@ -136,10 +136,18 @@ func TestDesc_Ready(t *testing.T) { t.Fatal("expected ready, got", err) } + if err := r.Ready(now, 0); err != nil { + t.Fatal("expected ready, got", err) + } + if err := r.Ready(now.Add(5*time.Minute), 10*time.Second); err == nil { t.Fatal("expected !ready (no heartbeat from active ingester), but got no error") } + if err := r.Ready(now.Add(5*time.Minute), 0); err != nil { + t.Fatal("expected ready (no heartbeat but timeout disabled), got", err) + } + r = &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { diff --git a/pkg/ring/ring.go b/pkg/ring/ring.go index 8d5bd0c925a..72165ed369d 100644 --- a/pkg/ring/ring.go +++ b/pkg/ring/ring.go @@ -147,7 +147,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { cfg.KVStore.RegisterFlagsWithPrefix(prefix, "collectors/", f) - f.DurationVar(&cfg.HeartbeatTimeout, prefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.") + f.DurationVar(&cfg.HeartbeatTimeout, prefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes. 0 = never (timeout disabled).") f.IntVar(&cfg.ReplicationFactor, prefix+"distributor.replication-factor", 3, "The number of ingesters to write to and read from.") f.BoolVar(&cfg.ZoneAwarenessEnabled, prefix+"distributor.zone-awareness-enabled", false, "True to enable the zone-awareness and replicate ingested samples across different availability zones.") } diff --git a/pkg/ring/ring_test.go b/pkg/ring/ring_test.go index f1d038d291e..c4898e939cf 100644 --- a/pkg/ring/ring_test.go +++ b/pkg/ring/ring_test.go @@ -390,11 +390,11 @@ func TestRing_GetAllHealthy(t *testing.T) { } func TestRing_GetReplicationSetForOperation(t *testing.T) { - const heartbeatTimeout = time.Minute now := time.Now() tests := map[string]struct { ringInstances map[string]InstanceDesc + ringHeartbeatTimeout time.Duration ringReplicationFactor int expectedErrForRead error expectedSetForRead []string @@ -405,6 +405,7 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { }{ "should return error on empty ring": { ringInstances: nil, + ringHeartbeatTimeout: time.Minute, ringReplicationFactor: 1, expectedErrForRead: ErrEmptyRing, expectedErrForWrite: ErrEmptyRing, @@ -418,6 +419,21 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-30 * time.Second).Unix(), Tokens: GenerateTokens(128, nil)}, "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-40 * time.Second).Unix(), Tokens: GenerateTokens(128, nil)}, }, + ringHeartbeatTimeout: time.Minute, + ringReplicationFactor: 1, + expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + expectedSetForWrite: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + expectedSetForReporting: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, + }, + "should succeed on instances with old timestamps but heartbeat timeout disabled": { + ringInstances: map[string]InstanceDesc{ + "instance-1": {Addr: "127.0.0.1", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, + "instance-2": {Addr: "127.0.0.2", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, + "instance-3": {Addr: "127.0.0.3", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, + "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, + "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, + }, + ringHeartbeatTimeout: 0, ringReplicationFactor: 1, expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, expectedSetForWrite: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"}, @@ -431,6 +447,7 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-30 * time.Second).Unix(), Tokens: GenerateTokens(128, nil)}, "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, }, + ringHeartbeatTimeout: time.Minute, ringReplicationFactor: 1, expectedErrForRead: ErrTooManyUnhealthyInstances, expectedErrForWrite: ErrTooManyUnhealthyInstances, @@ -444,6 +461,7 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-30 * time.Second).Unix(), Tokens: GenerateTokens(128, nil)}, "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, }, + ringHeartbeatTimeout: time.Minute, ringReplicationFactor: 3, expectedSetForRead: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"}, expectedSetForWrite: []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"}, @@ -457,6 +475,7 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { "instance-4": {Addr: "127.0.0.4", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, "instance-5": {Addr: "127.0.0.5", State: ACTIVE, Timestamp: now.Add(-2 * time.Minute).Unix(), Tokens: GenerateTokens(128, nil)}, }, + ringHeartbeatTimeout: time.Minute, ringReplicationFactor: 3, expectedErrForRead: ErrTooManyUnhealthyInstances, expectedErrForWrite: ErrTooManyUnhealthyInstances, @@ -474,7 +493,7 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { ring := Ring{ cfg: Config{ - HeartbeatTimeout: heartbeatTimeout, + HeartbeatTimeout: testData.ringHeartbeatTimeout, ReplicationFactor: testData.ringReplicationFactor, }, ringDesc: ringDesc, diff --git a/pkg/ruler/ruler_ring.go b/pkg/ruler/ruler_ring.go index 20d2e900aba..a3902ed1c4d 100644 --- a/pkg/ruler/ruler_ring.go +++ b/pkg/ruler/ruler_ring.go @@ -57,7 +57,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("ruler.ring.", "rulers/", f) f.DurationVar(&cfg.HeartbeatPeriod, "ruler.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") - f.DurationVar(&cfg.HeartbeatTimeout, "ruler.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which rulers are considered unhealthy within the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "ruler.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which rulers are considered unhealthy within the ring. 0 = never (timeout disabled).") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} diff --git a/pkg/storegateway/gateway_ring.go b/pkg/storegateway/gateway_ring.go index 4ca5ed08a74..f32bf37ccd1 100644 --- a/pkg/storegateway/gateway_ring.go +++ b/pkg/storegateway/gateway_ring.go @@ -95,7 +95,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix(ringFlagsPrefix, "collectors/", f) f.DurationVar(&cfg.HeartbeatPeriod, ringFlagsPrefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") - f.DurationVar(&cfg.HeartbeatTimeout, ringFlagsPrefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which store gateways are considered unhealthy within the ring."+sharedOptionWithQuerier) + f.DurationVar(&cfg.HeartbeatTimeout, ringFlagsPrefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which store gateways are considered unhealthy within the ring. 0 = never (timeout disabled)."+sharedOptionWithQuerier) f.IntVar(&cfg.ReplicationFactor, ringFlagsPrefix+"replication-factor", 3, "The replication factor to use when sharding blocks."+sharedOptionWithQuerier) f.StringVar(&cfg.TokensFilePath, ringFlagsPrefix+"tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") f.BoolVar(&cfg.ZoneAwarenessEnabled, ringFlagsPrefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate blocks across different availability zones.") From 51b2a854465dd3c10abc63ae612eb80c56e1d0b3 Mon Sep 17 00:00:00 2001 From: Tyler Reid Date: Fri, 9 Jul 2021 02:03:54 -0500 Subject: [PATCH 007/137] Add a new config and metric for reporting ruler query execution wall time. (#4317) * Add a new config and metric for reporting ruler query execution wall time. Signed-off-by: Tyler Reid * Spacing and PR number fixup Signed-off-by: Tyler Reid * Wrap the defer in a function to make it defer after the return rather than after the if block. Add a unit test to validate we're tracking time correctly. Signed-off-by: Tyler Reid * Use seconds for our duration rather than nanoseconds Signed-off-by: Tyler Reid * Review comment fixes Signed-off-by: Tyler Reid * Update config flag in the config docs Signed-off-by: Tyler Reid * Pass counter rather than counter vector for metrics query function Signed-off-by: Tyler Reid * Fix comment in MetricsQueryFunction Signed-off-by: Tyler Reid * Move query metric and log to separate function. Add log message for ruler query time. Signed-off-by: Tyler Reid * Update config file and change log to show this a per user metric Signed-off-by: Tyler Reid * code review fixes Signed-off-by: Tyler Reid * update log message for ruler query metrics Signed-off-by: Tyler Reid * Remove append and just use the array for key values in the log messag Signed-off-by: Tyler Reid * Add query-frontend component to front end log message Signed-off-by: Tyler Reid Signed-off-by: Alvin Lin --- CHANGELOG.md | 1 + docs/configuration/config-file-reference.md | 5 +++ pkg/frontend/transport/handler.go | 1 + pkg/ruler/compat.go | 45 +++++++++++++++++++-- pkg/ruler/compat_test.go | 15 ++++++- pkg/ruler/ruler.go | 4 ++ pkg/ruler/ruler_test.go | 1 + 7 files changed, 68 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 95bd7af7b89..7ce523859cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # Changelog ## master / unreleased +* [FEATURE] Ruler: Add new `-ruler.query-stats-enabled` which when enabled will report the `cortex_ruler_query_seconds_total` as a per-user metric that tracks the sum of the wall time of executing queries in the ruler in seconds. #4317 * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [ENHANCEMENT] Add timeout for waiting on compactor to become ACTIVE in the ring. #4262 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index cb148bed356..de7ea120f58 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -1617,6 +1617,11 @@ ring: # processing will ignore them instead. Subject to sharding. # CLI flag: -ruler.disabled-tenants [disabled_tenants: | default = ""] + +# Report the wall time for ruler queries to complete as a per user metric and as +# an info level log message. +# CLI flag: -ruler.query-stats-enabled +[query_stats_enabled: | default = false] ``` ### `ruler_storage_config` diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 8fcf8e9630e..435a0227481 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -173,6 +173,7 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer // Log stats. logMessage := append([]interface{}{ "msg", "query stats", + "component", "query-frontend", "method", r.Method, "path", r.URL.Path, "response_time", queryResponseTime, diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 3c02695a9c6..c95cfcd9446 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -6,6 +6,7 @@ import ( "time" "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/notifier" @@ -20,6 +21,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/querier" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Pusher is an ingester server that accepts pushes. @@ -146,9 +148,7 @@ func EngineQueryFunc(engine *promql.Engine, q storage.Queryable, overrides Rules func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Counter) rules.QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { queries.Inc() - result, err := qf(ctx, qs, t) - // We rely on TranslateToPromqlApiError to do its job here... it returns nil, if err is nil. // It returns promql.ErrStorage, if error should be reported back as 500. // Other errors it returns are either for canceled or timed-out queriers (we're not reporting those as failures), @@ -163,6 +163,33 @@ func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Coun } } +func RecordAndReportRuleQueryMetrics(qf rules.QueryFunc, queryTime prometheus.Counter, logger log.Logger) rules.QueryFunc { + if queryTime == nil { + return qf + } + + return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { + // If we've been passed a counter we want to record the wall time spent executing this request. + timer := prometheus.NewTimer(nil) + defer func() { + querySeconds := timer.ObserveDuration().Seconds() + queryTime.Add(querySeconds) + + // Log ruler query stats. + logMessage := []interface{}{ + "msg", "query stats", + "component", "ruler", + "cortex_ruler_query_seconds_total", querySeconds, + "query", qs, + } + level.Info(util_log.WithContext(ctx, logger)).Log(logMessage...) + }() + + result, err := qf(ctx, qs, t) + return result, err + } +} + // This interface mimicks rules.Manager API. Interface is used to simplify tests. type RulesManager interface { // Starts rules manager. Blocks until Stop is called. @@ -199,12 +226,24 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi Name: "cortex_ruler_queries_failed_total", Help: "Number of failed queries by ruler.", }) + var rulerQuerySeconds *prometheus.CounterVec + if cfg.EnableQueryStats { + rulerQuerySeconds = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ruler_query_seconds_total", + Help: "Total amount of wall clock time spent processing queries by the ruler.", + }, []string{"user"}) + } return func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager { + var queryTime prometheus.Counter = nil + if rulerQuerySeconds != nil { + queryTime = rulerQuerySeconds.WithLabelValues(userID) + } + return rules.NewManager(&rules.ManagerOptions{ Appendable: NewPusherAppendable(p, userID, overrides, totalWrites, failedWrites), Queryable: q, - QueryFunc: MetricsQueryFunc(EngineQueryFunc(engine, q, overrides, userID), totalQueries, failedQueries), + QueryFunc: RecordAndReportRuleQueryMetrics(MetricsQueryFunc(EngineQueryFunc(engine, q, overrides, userID), totalQueries, failedQueries), queryTime, logger), Context: user.InjectOrgID(ctx, userID), ExternalURL: cfg.ExternalURL.URL, NotifyFunc: SendAlerts(notifier, cfg.ExternalURL.URL.String()), diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index 1c5a9fe17b2..dfcb251803a 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" @@ -230,7 +231,6 @@ func TestMetricsQueryFuncErrors(t *testing.T) { mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { return promql.Vector{}, tc.returnedError } - qf := MetricsQueryFunc(mockFunc, queries, failures) _, err := qf(context.Background(), "test", time.Now()) @@ -241,3 +241,16 @@ func TestMetricsQueryFuncErrors(t *testing.T) { }) } } + +func TestRecordAndReportRuleQueryMetrics(t *testing.T) { + queryTime := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}) + + mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + time.Sleep(1 * time.Second) + return promql.Vector{}, nil + } + qf := RecordAndReportRuleQueryMetrics(mockFunc, queryTime.WithLabelValues("userID"), log.NewNopLogger()) + _, _ = qf(context.Background(), "test", time.Now()) + + require.GreaterOrEqual(t, testutil.ToFloat64(queryTime.WithLabelValues("userID")), float64(1)) +} diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 25061a8d353..57b51ed103c 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -115,6 +115,8 @@ type Config struct { DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` RingCheckPeriod time.Duration `yaml:"-"` + + EnableQueryStats bool `yaml:"query_stats_enabled"` } // Validate config and returns error on failure @@ -173,6 +175,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.Var(&cfg.EnabledTenants, "ruler.enabled-tenants", "Comma separated list of tenants whose rules this ruler can evaluate. If specified, only these tenants will be handled by ruler, otherwise this ruler can process rules from all tenants. Subject to sharding.") f.Var(&cfg.DisabledTenants, "ruler.disabled-tenants", "Comma separated list of tenants whose rules this ruler cannot evaluate. If specified, a ruler that would normally pick the specified tenant(s) for processing will ignore them instead. Subject to sharding.") + f.BoolVar(&cfg.EnableQueryStats, "ruler.query-stats-enabled", false, "Report the wall time for ruler queries to complete as a per user metric and as an info level log message.") + cfg.RingCheckPeriod = 5 * time.Second } diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index a6e89918b6f..503474afd8c 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -60,6 +60,7 @@ func defaultRulerConfig(store rulestore.RuleStore) (Config, func()) { cfg.Ring.ListenPort = 0 cfg.Ring.InstanceAddr = "localhost" cfg.Ring.InstanceID = "localhost" + cfg.EnableQueryStats = false // Create a cleanup function that will be called at the end of the test cleanup := func() { From 9ceb7d606793489a2b1d429d1fa2ad097f51e710 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 9 Jul 2021 12:40:27 +0100 Subject: [PATCH 008/137] Add a security doc (#4337) I thought it would be good to put a security page into the docs, so that it shows up in a search. Content is just pointing at other resources. Signed-off-by: Bryan Boreham Signed-off-by: Alvin Lin --- docs/_index.md | 3 +++ docs/guides/security.md | 12 ++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 docs/guides/security.md diff --git a/docs/_index.md b/docs/_index.md index b97ef3a463b..a11bead3d90 100644 --- a/docs/_index.md +++ b/docs/_index.md @@ -36,6 +36,9 @@ should read: 1. [Getting started with Cortex](getting-started/_index.md) 1. [Information regarding configuring Cortex](configuration/_index.md) +There are also individual [guides](guides/_index.md) to many tasks. +Please review the important [security advice](guides/security.md) before deploying. + For a guide to contributing to Cortex, see the [contributor guidelines](contributing/). ## Further reading diff --git a/docs/guides/security.md b/docs/guides/security.md new file mode 100644 index 00000000000..96592715d3f --- /dev/null +++ b/docs/guides/security.md @@ -0,0 +1,12 @@ +--- +title: "Security" +linkTitle: "Security" +weight: 10 +slug: security +--- + +Cortex must be deployed with due care over system configuration, using principles such as "least privilege" to limit any exposure due to flaws in the source code. + +You must configure authorisation and authentication externally to Cortex; see [this guide](./authentication-and-authorisation.md) + +Information about security disclosures and mailing lists is [in the main repo](https://github.com/cortexproject/cortex/blob/master/SECURITY.md) From 6418248dc1d47fd008f78b8910eb8c24f5fd4703 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 9 Jul 2021 13:43:02 +0200 Subject: [PATCH 009/137] Optimise memberlist kv store access by storing data unencoded. (#4345) * Optimise memberlist kv store access by storing data unencoded. The following profile data was taken from running 50 idle ingesters with memberlist, with almost everything at default values (5s heartbeats): ``` 52.16% mergeBytesValueForKey +- 52.16% mergeValueForKey +- 47.84% computeNewValue +- 27.24% codec Proto Decode +- 26.25% mergeWithTime ``` It is apparent from the this that a lot of time is spent on the memberlist receive path, as might be expected, specifically, the merging of the update into the current state. The cost however is not in decoding the incoming states (occurs in `mergeBytesValueForKey` before `mergeValueForKey`), but in fact decoding _current state_ of the value in the store (as it is stored encoded). The ring state was measured at 123K (50 ingesters), so it makes sense that decoding could be costly. This can be avoided by storing the value in it's decoded `Mergeable` form. When doing this, care has to be taken to deep copy the value when accessed, as it is modified in place before being updated in the store, and accessed outside the store mutex. Note a side effect of this change is that is no longer straightforward to expose the `memberlist_kv_store_value_bytes` metric, as this reported the size of the encoded data, therefore it has been removed. Signed-off-by: Steve Simpson * Typo. Signed-off-by: Steve Simpson * Review comments. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- CHANGELOG.md | 2 + pkg/ring/kv/memberlist/kv_init_service.go | 50 ++++++----- pkg/ring/kv/memberlist/memberlist_client.go | 86 ++++++++----------- .../kv/memberlist/memberlist_client_test.go | 28 ++++++ pkg/ring/kv/memberlist/mergeable.go | 3 + pkg/ring/kv/memberlist/metrics.go | 10 --- pkg/ring/model.go | 5 ++ 7 files changed, 104 insertions(+), 80 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ce523859cb..114a40c7da9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * [FEATURE] Ruler: Add new `-ruler.query-stats-enabled` which when enabled will report the `cortex_ruler_query_seconds_total` as a per-user metric that tracks the sum of the wall time of executing queries in the ruler in seconds. #4317 * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 +* [CHANGE] Memberlist: the `memberlist_kv_store_value_bytes` has been removed due to values no longer being stored in-memory as encoded bytes. #4345 * [ENHANCEMENT] Add timeout for waiting on compactor to become ACTIVE in the ring. #4262 * [ENHANCEMENT] Reduce memory used by streaming queries, particularly in ruler. #4341 * [ENHANCEMENT] Ring: allow experimental configuration of disabling of heartbeat timeouts by setting the relevant configuration value to zero. Applies to the following: #4342 @@ -13,6 +14,7 @@ * `-alertmanager.sharding-ring.heartbeat-timeout` * `-compactor.ring.heartbeat-timeout` * `-store-gateway.sharding-ring.heartbeat-timeout` +* [ENHANCEMENT] Memberlist: optimized receive path for processing ring state updates, to help reduce CPU utilization in large clusters. #4345 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 * [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 diff --git a/pkg/ring/kv/memberlist/kv_init_service.go b/pkg/ring/kv/memberlist/kv_init_service.go index da4d965b7c9..517c6742819 100644 --- a/pkg/ring/kv/memberlist/kv_init_service.go +++ b/pkg/ring/kv/memberlist/kv_init_service.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/memberlist" "go.uber.org/atomic" - "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -106,12 +105,12 @@ func (kvs *KVInitService) ServeHTTP(w http.ResponseWriter, req *http.Request) { if err := req.ParseForm(); err == nil { if req.Form[downloadKeyParam] != nil { - downloadKey(w, kv.storeCopy(), req.Form[downloadKeyParam][0]) // Use first value, ignore the rest. + downloadKey(w, kv, kv.storeCopy(), req.Form[downloadKeyParam][0]) // Use first value, ignore the rest. return } if req.Form[viewKeyParam] != nil { - viewKey(w, kv, kv.storeCopy(), req.Form[viewKeyParam][0], getFormat(req)) + viewKey(w, kv.storeCopy(), req.Form[viewKeyParam][0], getFormat(req)) return } @@ -179,30 +178,25 @@ func viewMessage(w http.ResponseWriter, kv *KV, msg message, format string) { return } - formatValue(w, c, msg.Pair.Value, format) + val, err := c.Decode(msg.Pair.Value) + if err != nil { + http.Error(w, fmt.Sprintf("failed to decode: %v", err), http.StatusInternalServerError) + return + } + + formatValue(w, val, format) } -func viewKey(w http.ResponseWriter, kv *KV, store map[string]valueDesc, key string, format string) { +func viewKey(w http.ResponseWriter, store map[string]valueDesc, key string, format string) { if store[key].value == nil { http.Error(w, "value not found", http.StatusNotFound) return } - c := kv.GetCodec(store[key].codecID) - if c == nil { - http.Error(w, "codec not found", http.StatusNotFound) - return - } - - formatValue(w, c, store[key].value, format) + formatValue(w, store[key].value, format) } -func formatValue(w http.ResponseWriter, codec codec.Codec, value []byte, format string) { - val, err := codec.Decode(value) - if err != nil { - http.Error(w, fmt.Sprintf("failed to decode: %v", err), http.StatusInternalServerError) - return - } +func formatValue(w http.ResponseWriter, val interface{}, format string) { w.WriteHeader(200) w.Header().Add("content-type", "text/plain") @@ -214,7 +208,7 @@ func formatValue(w http.ResponseWriter, codec codec.Codec, value []byte, format enc.SetIndent("", " ") } - err = enc.Encode(val) + err := enc.Encode(val) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } @@ -224,7 +218,7 @@ func formatValue(w http.ResponseWriter, codec codec.Codec, value []byte, format } } -func downloadKey(w http.ResponseWriter, store map[string]valueDesc, key string) { +func downloadKey(w http.ResponseWriter, kv *KV, store map[string]valueDesc, key string) { if store[key].value == nil { http.Error(w, "value not found", http.StatusNotFound) return @@ -232,14 +226,26 @@ func downloadKey(w http.ResponseWriter, store map[string]valueDesc, key string) val := store[key] + c := kv.GetCodec(store[key].codecID) + if c == nil { + http.Error(w, "codec not found", http.StatusNotFound) + return + } + + encoded, err := c.Encode(val.value) + if err != nil { + http.Error(w, fmt.Sprintf("failed to encode: %v", err), http.StatusInternalServerError) + return + } + w.Header().Add("content-type", "application/octet-stream") // Set content-length so that client knows whether it has received full response or not. - w.Header().Add("content-length", strconv.Itoa(len(val.value))) + w.Header().Add("content-length", strconv.Itoa(len(encoded))) w.Header().Add("content-disposition", fmt.Sprintf("attachment; filename=%d-%s", val.version, key)) w.WriteHeader(200) // Ignore errors, we cannot do anything about them. - _, _ = w.Write(val.value) + _, _ = w.Write(encoded) } type pageData struct { diff --git a/pkg/ring/kv/memberlist/memberlist_client.go b/pkg/ring/kv/memberlist/memberlist_client.go index c0f962993b5..7e45868bd96 100644 --- a/pkg/ring/kv/memberlist/memberlist_client.go +++ b/pkg/ring/kv/memberlist/memberlist_client.go @@ -265,7 +265,6 @@ type KV struct { watchPrefixDroppedNotifications *prometheus.CounterVec storeValuesDesc *prometheus.Desc - storeSizesDesc *prometheus.Desc storeTombstones *prometheus.GaugeVec storeRemovedTombstones *prometheus.CounterVec @@ -292,9 +291,11 @@ type message struct { } type valueDesc struct { - // We store bytes here. Reason is that clients calling CAS function will modify the object in place, - // but unless CAS succeeds, we don't want those modifications to be visible. - value []byte + // We store the decoded value here to prevent decoding the entire state for every + // update we receive. Whilst the updates are small and fast to decode, + // the total state can be quite large. + // The CAS function is passed a deep copy because it modifies in-place. + value Mergeable // version (local only) is used to keep track of what we're gossiping about, and invalidate old messages version uint @@ -303,8 +304,16 @@ type valueDesc struct { codecID string } +func (v valueDesc) Clone() (result valueDesc) { + result = v + if v.value != nil { + result.value = v.value.Clone() + } + return +} + func (v valueDesc) String() string { - return fmt.Sprintf("size: %d, version: %d, codec: %s", len(v.value), v.version, v.codecID) + return fmt.Sprintf("version: %d, codec: %s", v.version, v.codecID) } var ( @@ -612,24 +621,16 @@ func (m *KV) Get(key string, codec codec.Codec) (interface{}, error) { // Returns current value with removed tombstones. func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint, err error) { m.storeMu.Lock() - v := m.store[key] + v := m.store[key].Clone() m.storeMu.Unlock() - out = nil if v.value != nil { - out, err = codec.Decode(v.value) - if err != nil { - return nil, 0, err - } - - if mr, ok := out.(Mergeable); ok { - // remove ALL tombstones before returning to client. - // No need for clients to see them. - _, _ = mr.RemoveTombstones(time.Time{}) - } + // remove ALL tombstones before returning to client. + // No need for clients to see them. + _, _ = v.value.RemoveTombstones(time.Time{}) } - return out, v.version, nil + return v.value, v.version, nil } // WatchKey watches for value changes for given key. When value changes, 'f' function is called with the @@ -1043,9 +1044,21 @@ func (m *KV) LocalState(join bool) []byte { continue } + codec := m.GetCodec(val.codecID) + if codec == nil { + level.Error(m.logger).Log("msg", "failed to encode remote state: unknown codec for key", "codec", val.codecID, "key", key) + continue + } + + encoded, err := codec.Encode(val.value) + if err != nil { + level.Error(m.logger).Log("msg", "failed to encode remote state", "err", err) + continue + } + kvPair.Reset() kvPair.Key = key - kvPair.Value = val.value + kvPair.Value = encoded kvPair.Codec = val.codecID ser, err := kvPair.Marshal() @@ -1055,7 +1068,7 @@ func (m *KV) LocalState(join bool) []byte { } if uint(len(ser)) > math.MaxUint32 { - level.Error(m.logger).Log("msg", "value too long", "key", key, "value_length", len(val.value)) + level.Error(m.logger).Log("msg", "value too long", "key", key, "value_length", len(encoded)) continue } @@ -1177,12 +1190,12 @@ func (m *KV) mergeValueForKey(key string, incomingValue Mergeable, casVersion ui m.storeMu.Lock() defer m.storeMu.Unlock() - curr := m.store[key] + curr := m.store[key].Clone() // if casVersion is 0, then there was no previous value, so we will just do normal merge, without localCAS flag set. if casVersion > 0 && curr.version != casVersion { return nil, 0, errVersionMismatch } - result, change, err := computeNewValue(incomingValue, curr.value, codec, casVersion > 0) + result, change, err := computeNewValue(incomingValue, curr.value, casVersion > 0) if err != nil { return nil, 0, err } @@ -1199,14 +1212,9 @@ func (m *KV) mergeValueForKey(key string, incomingValue Mergeable, casVersion ui m.storeRemovedTombstones.WithLabelValues(key).Add(float64(removed)) } - encoded, err := codec.Encode(result) - if err != nil { - return nil, 0, fmt.Errorf("failed to encode merged result: %v", err) - } - newVersion := curr.version + 1 m.store[key] = valueDesc{ - value: encoded, + value: result, version: newVersion, codecID: codec.CodecID(), } @@ -1215,25 +1223,7 @@ func (m *KV) mergeValueForKey(key string, incomingValue Mergeable, casVersion ui } // returns [result, change, error] -func computeNewValue(incoming Mergeable, stored []byte, c codec.Codec, cas bool) (Mergeable, Mergeable, error) { - if len(stored) == 0 { - return incoming, incoming, nil - } - - old, err := c.Decode(stored) - if err != nil { - return incoming, incoming, fmt.Errorf("failed to decode stored value: %v", err) - } - - if old == nil { - return incoming, incoming, nil - } - - oldVal, ok := old.(Mergeable) - if !ok { - return incoming, incoming, fmt.Errorf("stored value is not Mergeable, got %T", old) - } - +func computeNewValue(incoming Mergeable, oldVal Mergeable, cas bool) (Mergeable, Mergeable, error) { if oldVal == nil { return incoming, incoming, nil } @@ -1249,7 +1239,7 @@ func (m *KV) storeCopy() map[string]valueDesc { result := make(map[string]valueDesc, len(m.store)) for k, v := range m.store { - result[k] = v + result[k] = v.Clone() } return result } diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index de337571da3..2782c6f931a 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -89,6 +89,26 @@ func (d *data) RemoveTombstones(limit time.Time) (_, _ int) { return } +func (m member) clone() member { + out := member{ + Timestamp: m.Timestamp, + Tokens: make([]uint32, len(m.Tokens)), + State: m.State, + } + copy(out.Tokens, m.Tokens) + return out +} + +func (d *data) Clone() Mergeable { + out := &data{ + Members: make(map[string]member, len(d.Members)), + } + for k, v := range d.Members { + out.Members[k] = v.clone() + } + return out +} + func (d *data) getAllTokens() []uint32 { out := []uint32(nil) for _, m := range d.Members { @@ -872,6 +892,14 @@ func (dc distributedCounter) RemoveTombstones(limit time.Time) (_, _ int) { return } +func (dc distributedCounter) Clone() Mergeable { + out := make(distributedCounter, len(dc)) + for k, v := range dc { + out[k] = v + } + return out +} + type distributedCounterCodec struct{} func (d distributedCounterCodec) CodecID() string { diff --git a/pkg/ring/kv/memberlist/mergeable.go b/pkg/ring/kv/memberlist/mergeable.go index 0cb9964f1fc..a013e349880 100644 --- a/pkg/ring/kv/memberlist/mergeable.go +++ b/pkg/ring/kv/memberlist/mergeable.go @@ -40,4 +40,7 @@ type Mergeable interface { // time when client is accessing value from the store. It can be used to hide tombstones from the clients. // Returns the total number of tombstones present and the number of removed tombstones by this invocation. RemoveTombstones(limit time.Time) (total, removed int) + + // Clone should return a deep copy of the state. + Clone() Mergeable } diff --git a/pkg/ring/kv/memberlist/metrics.go b/pkg/ring/kv/memberlist/metrics.go index ff729723da1..e6e7d9bb4df 100644 --- a/pkg/ring/kv/memberlist/metrics.go +++ b/pkg/ring/kv/memberlist/metrics.go @@ -117,11 +117,6 @@ func (m *KV) createAndRegisterMetrics() { "Number of values in KV Store", nil, nil) - m.storeSizesDesc = prometheus.NewDesc( - prometheus.BuildFQName(m.cfg.MetricsNamespace, subsystem, "kv_store_value_bytes"), // gauge - "Sizes of values in KV Store in bytes", - []string{"key"}, nil) - m.storeTombstones = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: m.cfg.MetricsNamespace, Subsystem: subsystem, @@ -222,7 +217,6 @@ func (m *KV) createAndRegisterMetrics() { // Describe returns prometheus descriptions via supplied channel func (m *KV) Describe(ch chan<- *prometheus.Desc) { ch <- m.storeValuesDesc - ch <- m.storeSizesDesc } // Collect returns extra metrics via supplied channel @@ -231,8 +225,4 @@ func (m *KV) Collect(ch chan<- prometheus.Metric) { defer m.storeMu.Unlock() ch <- prometheus.MustNewConstMetric(m.storeValuesDesc, prometheus.GaugeValue, float64(len(m.store))) - - for k, v := range m.store { - ch <- prometheus.MustNewConstMetric(m.storeSizesDesc, prometheus.GaugeValue, float64(len(v.value)), k) - } } diff --git a/pkg/ring/model.go b/pkg/ring/model.go index dac6103b52a..3a257c39528 100644 --- a/pkg/ring/model.go +++ b/pkg/ring/model.go @@ -398,6 +398,11 @@ func (d *Desc) RemoveTombstones(limit time.Time) (total, removed int) { return } +// Clone returns a deep copy of the ring state. +func (d *Desc) Clone() memberlist.Mergeable { + return proto.Clone(d).(*Desc) +} + func (d *Desc) getTokensInfo() map[uint32]instanceInfo { out := map[uint32]instanceInfo{} From 869fe0640b5b3abfcf41ff9d4e232a93bffd3c56 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 9 Jul 2021 14:19:55 +0200 Subject: [PATCH 010/137] Allow disabling of ring heartbeats by setting relevant options to zero. (#4344) * Allow disabling of ring heartbeats by setting relevant options to zero. Signed-off-by: Steve Simpson * Review comments. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- CHANGELOG.md | 7 ++++++ docs/blocks-storage/compactor.md | 2 +- docs/blocks-storage/store-gateway.md | 2 +- docs/configuration/config-file-reference.md | 12 ++++----- docs/configuration/v1-guarantees.md | 9 ++++++- pkg/alertmanager/alertmanager_ring.go | 2 +- pkg/compactor/compactor_ring.go | 2 +- pkg/distributor/distributor_ring.go | 2 +- pkg/ring/basic_lifecycler.go | 19 +++++++------- pkg/ring/lifecycler.go | 15 +++++------ pkg/ruler/ruler_ring.go | 2 +- pkg/storegateway/gateway_ring.go | 2 +- pkg/util/time.go | 11 ++++++++ pkg/util/time_test.go | 28 +++++++++++++++++++++ 14 files changed, 85 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 114a40c7da9..64066a61bf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,13 @@ * `-alertmanager.sharding-ring.heartbeat-timeout` * `-compactor.ring.heartbeat-timeout` * `-store-gateway.sharding-ring.heartbeat-timeout` +* [ENHANCEMENT] Ring: allow heartbeats to be explicitly disabled by setting the interval to zero. This is considered experimental. This applies to the following configuration options: #4344 + * `-distributor.ring.heartbeat-period` + * `-ingester.heartbeat-period` + * `-ruler.ring.heartbeat-period` + * `-alertmanager.sharding-ring.heartbeat-period` + * `-compactor.ring.heartbeat-period` + * `-store-gateway.sharding-ring.heartbeat-period` * [ENHANCEMENT] Memberlist: optimized receive path for processing ring state updates, to help reduce CPU utilization in large clusters. #4345 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 * [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index f5a88c52daa..c8620d3bf97 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -214,7 +214,7 @@ compactor: # CLI flag: -compactor.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. + # Period at which to heartbeat to the ring. 0 = disabled. # CLI flag: -compactor.ring.heartbeat-period [heartbeat_period: | default = 5s] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 3cd768fea25..a00297a4455 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -232,7 +232,7 @@ store_gateway: # CLI flag: -store-gateway.sharding-ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. + # Period at which to heartbeat to the ring. 0 = disabled. # CLI flag: -store-gateway.sharding-ring.heartbeat-period [heartbeat_period: | default = 15s] diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index de7ea120f58..22859a244e3 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -563,7 +563,7 @@ ring: # CLI flag: -distributor.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. + # Period at which to heartbeat to the ring. 0 = disabled. # CLI flag: -distributor.ring.heartbeat-period [heartbeat_period: | default = 5s] @@ -679,7 +679,7 @@ lifecycler: # CLI flag: -ingester.num-tokens [num_tokens: | default = 128] - # Period at which to heartbeat to consul. + # Period at which to heartbeat to consul. 0 = disabled. # CLI flag: -ingester.heartbeat-period [heartbeat_period: | default = 5s] @@ -1581,7 +1581,7 @@ ring: # CLI flag: -ruler.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. + # Period at which to heartbeat to the ring. 0 = disabled. # CLI flag: -ruler.ring.heartbeat-period [heartbeat_period: | default = 5s] @@ -1907,7 +1907,7 @@ sharding_ring: # CLI flag: -alertmanager.sharding-ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. + # Period at which to heartbeat to the ring. 0 = disabled. # CLI flag: -alertmanager.sharding-ring.heartbeat-period [heartbeat_period: | default = 15s] @@ -5183,7 +5183,7 @@ sharding_ring: # CLI flag: -compactor.ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. + # Period at which to heartbeat to the ring. 0 = disabled. # CLI flag: -compactor.ring.heartbeat-period [heartbeat_period: | default = 5s] @@ -5261,7 +5261,7 @@ sharding_ring: # CLI flag: -store-gateway.sharding-ring.multi.mirror-timeout [mirror_timeout: | default = 2s] - # Period at which to heartbeat to the ring. + # Period at which to heartbeat to the ring. 0 = disabled. # CLI flag: -store-gateway.sharding-ring.heartbeat-period [heartbeat_period: | default = 15s] diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 36e39221ffe..3af5dd4cd14 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -87,4 +87,11 @@ Currently experimental features are: - `-ruler.ring.heartbeat-timeout=0` - `-alertmanager.sharding-ring.heartbeat-timeout=0` - `-compactor.ring.heartbeat-timeout=0` - - `-store-gateway.sharding-ring.heartbeat-timeout=0` \ No newline at end of file + - `-store-gateway.sharding-ring.heartbeat-timeout=0` +- Disabling ring heartbeats + - `-distributor.ring.heartbeat-period=0` + - `-ingester.heartbeat-period=0` + - `-ruler.ring.heartbeat-period=0` + - `-alertmanager.sharding-ring.heartbeat-period=0` + - `-compactor.ring.heartbeat-period=0` + - `-store-gateway.sharding-ring.heartbeat-period=0` diff --git a/pkg/alertmanager/alertmanager_ring.go b/pkg/alertmanager/alertmanager_ring.go index f0b707a0901..0c60d0f69c5 100644 --- a/pkg/alertmanager/alertmanager_ring.go +++ b/pkg/alertmanager/alertmanager_ring.go @@ -76,7 +76,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix(rfprefix, "alertmanagers/", f) - f.DurationVar(&cfg.HeartbeatPeriod, rfprefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatPeriod, rfprefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, rfprefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which alertmanagers are considered unhealthy within the ring. 0 = never (timeout disabled).") f.IntVar(&cfg.ReplicationFactor, rfprefix+"replication-factor", 3, "The replication factor to use when sharding the alertmanager.") f.BoolVar(&cfg.ZoneAwarenessEnabled, rfprefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate alerts across different availability zones.") diff --git a/pkg/compactor/compactor_ring.go b/pkg/compactor/compactor_ring.go index d384220a327..285a1fa6bc7 100644 --- a/pkg/compactor/compactor_ring.go +++ b/pkg/compactor/compactor_ring.go @@ -50,7 +50,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("compactor.ring.", "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).") // Wait stability flags. diff --git a/pkg/distributor/distributor_ring.go b/pkg/distributor/distributor_ring.go index ce70464e342..3655aa99854 100644 --- a/pkg/distributor/distributor_ring.go +++ b/pkg/distributor/distributor_ring.go @@ -42,7 +42,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("distributor.ring.", "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).") // Instance flags diff --git a/pkg/ring/basic_lifecycler.go b/pkg/ring/basic_lifecycler.go index 6c19d72c281..64f5f6002c4 100644 --- a/pkg/ring/basic_lifecycler.go +++ b/pkg/ring/basic_lifecycler.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -182,12 +183,12 @@ func (l *BasicLifecycler) starting(ctx context.Context) error { } func (l *BasicLifecycler) running(ctx context.Context) error { - heartbeatTicker := time.NewTicker(l.cfg.HeartbeatPeriod) - defer heartbeatTicker.Stop() + heartbeatTickerStop, heartbeatTickerChan := util.NewDisableableTicker(l.cfg.HeartbeatPeriod) + defer heartbeatTickerStop() for { select { - case <-heartbeatTicker.C: + case <-heartbeatTickerChan: l.heartbeat(ctx) case f := <-l.actorChan: @@ -214,13 +215,13 @@ func (l *BasicLifecycler) stopping(runningError error) error { }() // Heartbeat while the stopping delegate function is running. - heartbeatTicker := time.NewTicker(l.cfg.HeartbeatPeriod) - defer heartbeatTicker.Stop() + heartbeatTickerStop, heartbeatTickerChan := util.NewDisableableTicker(l.cfg.HeartbeatPeriod) + defer heartbeatTickerStop() heartbeatLoop: for { select { - case <-heartbeatTicker.C: + case <-heartbeatTickerChan: l.heartbeat(context.Background()) case <-done: break heartbeatLoop @@ -292,8 +293,8 @@ func (l *BasicLifecycler) registerInstance(ctx context.Context) error { } func (l *BasicLifecycler) waitStableTokens(ctx context.Context, period time.Duration) error { - heartbeatTicker := time.NewTicker(l.cfg.HeartbeatPeriod) - defer heartbeatTicker.Stop() + heartbeatTickerStop, heartbeatTickerChan := util.NewDisableableTicker(l.cfg.HeartbeatPeriod) + defer heartbeatTickerStop() // The first observation will occur after the specified period. level.Info(l.logger).Log("msg", "waiting stable tokens", "ring", l.ringName) @@ -312,7 +313,7 @@ func (l *BasicLifecycler) waitStableTokens(ctx context.Context, period time.Dura level.Info(l.logger).Log("msg", "tokens verification succeeded", "ring", l.ringName) return nil - case <-heartbeatTicker.C: + case <-heartbeatTickerChan: l.heartbeat(ctx) case <-ctx.Done(): diff --git a/pkg/ring/lifecycler.go b/pkg/ring/lifecycler.go index 73584ea0a2b..37897b70ba5 100644 --- a/pkg/ring/lifecycler.go +++ b/pkg/ring/lifecycler.go @@ -17,6 +17,7 @@ import ( "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" @@ -83,7 +84,7 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag } f.IntVar(&cfg.NumTokens, prefix+"num-tokens", 128, "Number of tokens for each ingester.") - f.DurationVar(&cfg.HeartbeatPeriod, prefix+"heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul.") + f.DurationVar(&cfg.HeartbeatPeriod, prefix+"heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul. 0 = disabled.") f.DurationVar(&cfg.JoinAfter, prefix+"join-after", 0*time.Second, "Period to wait for a claim from another member; will join automatically after this.") f.DurationVar(&cfg.ObservePeriod, prefix+"observe-period", 0*time.Second, "Observe tokens after generating to resolve collisions. Useful when using gossiping ring.") f.DurationVar(&cfg.MinReadyDuration, prefix+"min-ready-duration", 1*time.Minute, "Minimum duration to wait before becoming ready. This is to work around race conditions with ingesters exiting and updating the ring.") @@ -392,8 +393,8 @@ func (i *Lifecycler) loop(ctx context.Context) error { autoJoinAfter := time.After(i.cfg.JoinAfter) var observeChan <-chan time.Time = nil - heartbeatTicker := time.NewTicker(i.cfg.HeartbeatPeriod) - defer heartbeatTicker.Stop() + heartbeatTickerStop, heartbeatTickerChan := util.NewDisableableTicker(i.cfg.HeartbeatPeriod) + defer heartbeatTickerStop() for { select { @@ -442,7 +443,7 @@ func (i *Lifecycler) loop(ctx context.Context) error { observeChan = time.After(i.cfg.ObservePeriod) } - case <-heartbeatTicker.C: + case <-heartbeatTickerChan: consulHeartbeats.WithLabelValues(i.RingName).Inc() if err := i.updateConsul(context.Background()); err != nil { level.Error(log.Logger).Log("msg", "failed to write to the KV store, sleeping", "ring", i.RingName, "err", err) @@ -469,8 +470,8 @@ func (i *Lifecycler) stopping(runningError error) error { return nil } - heartbeatTicker := time.NewTicker(i.cfg.HeartbeatPeriod) - defer heartbeatTicker.Stop() + heartbeatTickerStop, heartbeatTickerChan := util.NewDisableableTicker(i.cfg.HeartbeatPeriod) + defer heartbeatTickerStop() // Mark ourselved as Leaving so no more samples are send to us. err := i.changeState(context.Background(), LEAVING) @@ -489,7 +490,7 @@ func (i *Lifecycler) stopping(runningError error) error { heartbeatLoop: for { select { - case <-heartbeatTicker.C: + case <-heartbeatTickerChan: consulHeartbeats.WithLabelValues(i.RingName).Inc() if err := i.updateConsul(context.Background()); err != nil { level.Error(log.Logger).Log("msg", "failed to write to the KV store, sleeping", "ring", i.RingName, "err", err) diff --git a/pkg/ruler/ruler_ring.go b/pkg/ruler/ruler_ring.go index a3902ed1c4d..e8577e82898 100644 --- a/pkg/ruler/ruler_ring.go +++ b/pkg/ruler/ruler_ring.go @@ -56,7 +56,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix("ruler.ring.", "rulers/", f) - f.DurationVar(&cfg.HeartbeatPeriod, "ruler.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatPeriod, "ruler.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, "ruler.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which rulers are considered unhealthy within the ring. 0 = never (timeout disabled).") // Instance flags diff --git a/pkg/storegateway/gateway_ring.go b/pkg/storegateway/gateway_ring.go index f32bf37ccd1..c868c2b789d 100644 --- a/pkg/storegateway/gateway_ring.go +++ b/pkg/storegateway/gateway_ring.go @@ -94,7 +94,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { // Ring flags cfg.KVStore.RegisterFlagsWithPrefix(ringFlagsPrefix, "collectors/", f) - f.DurationVar(&cfg.HeartbeatPeriod, ringFlagsPrefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatPeriod, ringFlagsPrefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, ringFlagsPrefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which store gateways are considered unhealthy within the ring. 0 = never (timeout disabled)."+sharedOptionWithQuerier) f.IntVar(&cfg.ReplicationFactor, ringFlagsPrefix+"replication-factor", 3, "The replication factor to use when sharding blocks."+sharedOptionWithQuerier) f.StringVar(&cfg.TokensFilePath, ringFlagsPrefix+"tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") diff --git a/pkg/util/time.go b/pkg/util/time.go index 58f951a8b3d..8816b1d7d2e 100644 --- a/pkg/util/time.go +++ b/pkg/util/time.go @@ -73,3 +73,14 @@ func DurationWithPositiveJitter(input time.Duration, variancePerc float64) time. return input + time.Duration(jitter) } + +// NewDisableableTicker essentially wraps NewTicker but allows the ticker to be disabled by passing +// zero duration as the interval. Returns a function for stopping the ticker, and the ticker channel. +func NewDisableableTicker(interval time.Duration) (func(), <-chan time.Time) { + if interval == 0 { + return func() {}, nil + } + + tick := time.NewTicker(interval) + return func() { tick.Stop() }, tick.C +} diff --git a/pkg/util/time_test.go b/pkg/util/time_test.go index 700fe55fab5..ab1da4a85bf 100644 --- a/pkg/util/time_test.go +++ b/pkg/util/time_test.go @@ -103,3 +103,31 @@ func TestParseTime(t *testing.T) { assert.Equal(t, TimeToMillis(test.result), ts) } } + +func TestNewDisableableTicker_Enabled(t *testing.T) { + stop, ch := NewDisableableTicker(10 * time.Millisecond) + defer stop() + + time.Sleep(100 * time.Millisecond) + + select { + case <-ch: + break + default: + t.Error("ticker should have ticked when enabled") + } +} + +func TestNewDisableableTicker_Disabled(t *testing.T) { + stop, ch := NewDisableableTicker(0) + defer stop() + + time.Sleep(100 * time.Millisecond) + + select { + case <-ch: + t.Error("ticker should not have ticked when disabled") + default: + break + } +} From bba2774379da7b9b4ae856cf4b72e8bbb3327934 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 9 Jul 2021 14:22:28 +0200 Subject: [PATCH 011/137] Expose configuration of memberlist packet compression. (#4346) * Expose configuration of memberlist packet compression. Allows manually specifying whether memberlist should compress packets via a new configuration flag: `-memberlist.enable-compression`. This typically has little benefit for Cortex, as the ring state messages are already compressed with Snappy, the second layer of compression does not achieve any additional saving. It's not clear cut whether there might still be some benefit for internal memberlist messages; this needs to be evaluated in a environment of some reasonable scale. Signed-off-by: Steve Simpson * Review comments. Signed-off-by: Steve Simpson * Review comments. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- CHANGELOG.md | 1 + docs/configuration/config-file-reference.md | 5 ++++ ...tegration_memberlist_single_binary_test.go | 29 ++++++++++++------- pkg/ring/kv/memberlist/memberlist_client.go | 3 ++ 4 files changed, 27 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64066a61bf7..2bc8af553da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ * `-compactor.ring.heartbeat-period` * `-store-gateway.sharding-ring.heartbeat-period` * [ENHANCEMENT] Memberlist: optimized receive path for processing ring state updates, to help reduce CPU utilization in large clusters. #4345 +* [ENHANCEMENT] Memberlist: expose configuration of memberlist packet compression via `-memberlist.compression=enabled`. #4346 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 * [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 22859a244e3..0f70a3f3e59 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -3796,6 +3796,11 @@ The `memberlist_config` configures the Gossip memberlist. # CLI flag: -memberlist.dead-node-reclaim-time [dead_node_reclaim_time: | default = 0s] +# Enable message compression. This can be used to reduce bandwidth usage at the +# cost of slightly more CPU utilization. +# CLI flag: -memberlist.compression-enabled +[compression_enabled: | default = true] + # Other cluster members to join. Can be specified multiple times. It can be an # IP, hostname or an entry specified in the DNS Service Discovery format (see # https://cortexmetrics.io/docs/configuration/arguments/#dns-service-discovery diff --git a/integration/integration_memberlist_single_binary_test.go b/integration/integration_memberlist_single_binary_test.go index 53c1a945fd0..f4cd7b79140 100644 --- a/integration/integration_memberlist_single_binary_test.go +++ b/integration/integration_memberlist_single_binary_test.go @@ -23,15 +23,21 @@ import ( func TestSingleBinaryWithMemberlist(t *testing.T) { t.Run("default", func(t *testing.T) { - testSingleBinaryEnv(t, false) + testSingleBinaryEnv(t, false, nil) }) t.Run("tls", func(t *testing.T) { - testSingleBinaryEnv(t, true) + testSingleBinaryEnv(t, true, nil) + }) + + t.Run("compression-disabled", func(t *testing.T) { + testSingleBinaryEnv(t, false, map[string]string{ + "-memberlist.compression-enabled": "false", + }) }) } -func testSingleBinaryEnv(t *testing.T, tlsEnabled bool) { +func testSingleBinaryEnv(t *testing.T, tlsEnabled bool, flags map[string]string) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) defer s.Close() @@ -65,13 +71,13 @@ func testSingleBinaryEnv(t *testing.T, tlsEnabled bool) { filepath.Join(s.SharedDir(), clientKeyFile), )) - cortex1 = newSingleBinary("cortex-1", memberlistDNS, "") - cortex2 = newSingleBinary("cortex-2", memberlistDNS, networkName+"-cortex-1:8000") - cortex3 = newSingleBinary("cortex-3", memberlistDNS, networkName+"-cortex-1:8000") + cortex1 = newSingleBinary("cortex-1", memberlistDNS, "", flags) + cortex2 = newSingleBinary("cortex-2", memberlistDNS, networkName+"-cortex-1:8000", flags) + cortex3 = newSingleBinary("cortex-3", memberlistDNS, networkName+"-cortex-1:8000", flags) } else { - cortex1 = newSingleBinary("cortex-1", "", "") - cortex2 = newSingleBinary("cortex-2", "", networkName+"-cortex-1:8000") - cortex3 = newSingleBinary("cortex-3", "", networkName+"-cortex-1:8000") + cortex1 = newSingleBinary("cortex-1", "", "", flags) + cortex2 = newSingleBinary("cortex-2", "", networkName+"-cortex-1:8000", flags) + cortex3 = newSingleBinary("cortex-3", "", networkName+"-cortex-1:8000", flags) } // start cortex-1 first, as cortex-2 and cortex-3 both connect to cortex-1 @@ -109,7 +115,7 @@ func testSingleBinaryEnv(t *testing.T, tlsEnabled bool) { require.NoError(t, s.Stop(cortex3)) } -func newSingleBinary(name string, servername string, join string) *e2ecortex.CortexService { +func newSingleBinary(name string, servername string, join string, testFlags map[string]string) *e2ecortex.CortexService { flags := map[string]string{ "-ingester.final-sleep": "0s", "-ingester.join-after": "0s", // join quickly @@ -132,6 +138,7 @@ func newSingleBinary(name string, servername string, join string) *e2ecortex.Cor mergeFlags( ChunksStorageFlags(), flags, + testFlags, getTLSFlagsWithPrefix("memberlist", servername, servername == ""), ), "", @@ -170,7 +177,7 @@ func TestSingleBinaryWithMemberlistScaling(t *testing.T) { if i > 0 { join = e2e.NetworkContainerHostPort(networkName, "cortex-1", 8000) } - c := newSingleBinary(name, "", join) + c := newSingleBinary(name, "", join, nil) require.NoError(t, s.StartAndWaitReady(c)) instances = append(instances, c) } diff --git a/pkg/ring/kv/memberlist/memberlist_client.go b/pkg/ring/kv/memberlist/memberlist_client.go index 7e45868bd96..79432d4668d 100644 --- a/pkg/ring/kv/memberlist/memberlist_client.go +++ b/pkg/ring/kv/memberlist/memberlist_client.go @@ -136,6 +136,7 @@ type KVConfig struct { GossipNodes int `yaml:"gossip_nodes"` GossipToTheDeadTime time.Duration `yaml:"gossip_to_dead_nodes_time"` DeadNodeReclaimTime time.Duration `yaml:"dead_node_reclaim_time"` + EnableCompression bool `yaml:"compression_enabled"` // List of members to join JoinMembers flagext.StringSlice `yaml:"join_members"` @@ -187,6 +188,7 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.DurationVar(&cfg.GossipToTheDeadTime, prefix+"memberlist.gossip-to-dead-nodes-time", mlDefaults.GossipToTheDeadTime, "How long to keep gossiping to dead nodes, to give them chance to refute their death.") f.DurationVar(&cfg.DeadNodeReclaimTime, prefix+"memberlist.dead-node-reclaim-time", mlDefaults.DeadNodeReclaimTime, "How soon can dead node's name be reclaimed with new address. 0 to disable.") f.IntVar(&cfg.MessageHistoryBufferBytes, prefix+"memberlist.message-history-buffer-bytes", 0, "How much space to use for keeping received and sent messages in memory for troubleshooting (two buffers). 0 to disable.") + f.BoolVar(&cfg.EnableCompression, prefix+"memberlist.compression-enabled", mlDefaults.EnableCompression, "Enable message compression. This can be used to reduce bandwidth usage at the cost of slightly more CPU utilization.") cfg.TCPTransport.RegisterFlags(f, prefix) } @@ -380,6 +382,7 @@ func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { mlCfg.GossipNodes = m.cfg.GossipNodes mlCfg.GossipToTheDeadTime = m.cfg.GossipToTheDeadTime mlCfg.DeadNodeReclaimTime = m.cfg.DeadNodeReclaimTime + mlCfg.EnableCompression = m.cfg.EnableCompression if m.cfg.NodeName != "" { mlCfg.Name = m.cfg.NodeName From bbdcfb31ca9f1f5e1d73dff561a1a231fd636beb Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 9 Jul 2021 14:29:14 +0200 Subject: [PATCH 012/137] Attempt at making SyncOnRingTopologyChanges test more reliable. (#4348) It was only waiting one second for the second sync to complete, which is probably too harsh a deadline than necessary for overloaded systems. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- pkg/alertmanager/multitenant_test.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index dc60301eb8e..d8af49e996a 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -1325,18 +1325,15 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { return ringDesc, true, nil })) - // Assert if we expected a sync or not. + // Assert if we expected an additional sync or not. + expectedSyncs := 1 if tt.expected { - test.Poll(t, time.Second, float64(2), func() interface{} { - metrics := regs.BuildMetricFamiliesPerUser() - return metrics.GetSumOfCounters("cortex_alertmanager_sync_configs_total") - }) - } else { - time.Sleep(250 * time.Millisecond) - - metrics := regs.BuildMetricFamiliesPerUser() - assert.Equal(t, float64(1), metrics.GetSumOfCounters("cortex_alertmanager_sync_configs_total")) + expectedSyncs++ } + test.Poll(t, 5*time.Second, float64(expectedSyncs), func() interface{} { + metrics := regs.BuildMetricFamiliesPerUser() + return metrics.GetSumOfCounters("cortex_alertmanager_sync_configs_total") + }) }) } } From 41c6676d501f2dbfd92c49d0f1a68a2bcb05c576 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 9 Jul 2021 14:34:06 +0200 Subject: [PATCH 013/137] Fix StateReplicationWithSharding_InitialSyncFromPeers flaking. (#4349) The test is writing a single silence and checking a metric which indicates whether replicating the silence has been attempted yet. This is so we can check later on that no replication activity occurs. The assertions later on in the test are passing, but the first one is not, indicating that the replication doesn't trigger early enough. This makes sense because the replication is not synchronous with the writing of the silence. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- pkg/alertmanager/multitenant_test.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index d8af49e996a..285914b99b6 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -1817,7 +1817,14 @@ func TestAlertmanager_StateReplicationWithSharding_InitialSyncFromPeers(t *testi { metrics := registries.BuildMetricFamiliesPerUser() assert.Equal(t, float64(1), metrics.GetSumOfGauges("cortex_alertmanager_silences")) - assert.Equal(t, float64(1), metrics.GetSumOfCounters("cortex_alertmanager_state_replication_total")) + } + // 2.c. Wait for the silence replication to be attempted; note this is asynchronous. + { + test.Poll(t, 5*time.Second, float64(1), func() interface{} { + metrics := registries.BuildMetricFamiliesPerUser() + return metrics.GetSumOfCounters("cortex_alertmanager_state_replication_total") + }) + metrics := registries.BuildMetricFamiliesPerUser() assert.Equal(t, float64(0), metrics.GetSumOfCounters("cortex_alertmanager_state_replication_failed_total")) } From 02de2c9717c884fb43440556b8279e5e0c1cda71 Mon Sep 17 00:00:00 2001 From: Ilan <34989469+ilangofman@users.noreply.github.com> Date: Fri, 9 Jul 2021 09:41:22 -0400 Subject: [PATCH 014/137] Proposal for time series deletion with block storage (#4274) * Add proposal document Signed-off-by: Gofman Signed-off-by: ilangofman * Minor text modifications Signed-off-by: ilangofman * Implement requested changes to the proposal Signed-off-by: ilangofman * Fix mention of Compactor instead of purger in proposal Signed-off-by: ilangofman * Fixed wording and spelling in proposal Signed-off-by: ilangofman * Update the cache invalidation method Signed-off-by: ilangofman * Fix wording on cache invalidation section Signed-off-by: ilangofman * Minor wording additions Signed-off-by: ilangofman * Remove white-noise from text Signed-off-by: ilangofman * Remove the deleting state and change cache invalidation Signed-off-by: ilangofman * Add deleted state and update cache invalidation Signed-off-by: ilangofman * Add one word to clear things up Signed-off-by: ilangofman * update api limits section Signed-off-by: ilangofman * ran clean white noise Signed-off-by: ilangofman Signed-off-by: Alvin Lin --- .../block-storage-time-series-deletion.md | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 docs/proposals/block-storage-time-series-deletion.md diff --git a/docs/proposals/block-storage-time-series-deletion.md b/docs/proposals/block-storage-time-series-deletion.md new file mode 100644 index 00000000000..20d7a346e52 --- /dev/null +++ b/docs/proposals/block-storage-time-series-deletion.md @@ -0,0 +1,255 @@ +--- +title: "Time Series Deletion from Blocks Storage" +linkTitle: "Time Series Deletion from Blocks Storage" +weight: 1 +slug: block-storage-time-series-deletion +--- + +- Author: [Ilan Gofman](https://github.com/ilangofman) +- Date: June 2021 +- Status: Proposal + +## Problem + +Currently, Cortex only implements a time series deletion API for chunk storage. We present a design for implementing time series deletion with block storage. We would like to have the same API for deleting series as currently implemented in Prometheus and in Cortex with chunk storage. + + +This can be very important for users to have as confidential or accidental data might have been incorrectly pushed and needs to be removed. As well as potentially removing high cardinality data that is causing inefficient queries. + +## Related works + +As previously mentioned, the deletion feature is already implemented with chunk storage. The main functionality is implemented through the purger service. It accepts requests for deletion and processes them. At first, when a deletion request is made, a tombstone is created. This is used to filter out the data for queries. After some time, a deletion plan is executed where the data is permanently removed from chunk storage. + +Can find more info here: + +- [Cortex documentation for chunk store deletion](https://cortexmetrics.io/docs/guides/deleting-series/) +- [Chunk deletion proposal](https://docs.google.com/document/d/1PeKwP3aGo3xVrR-2qJdoFdzTJxT8FcAbLm2ew_6UQyQ/edit) + + + +## Background on current storage + +With a block-storage configuration, Cortex stores data that could be potentially deleted by a user in: + +- Object store (GCS, S3, etc..) for long term storage of blocks +- Ingesters for more recent data that should be eventually transferred to the object store +- Cache + - Index cache + - Metadata cache + - Chunks cache (stores the potentially to be deleted data) + - Query results cache (stores the potentially to be deleted data) +- Compactor during the compaction process +- Store-gateway + + +## Proposal + +The deletion will not happen right away. Initially, the data will be filtered out from queries using tombstones and will be deleted afterward. This will allow the user some time to cancel the delete request. + +### API Endpoints + +The existing purger service will be used to process the incoming requests for deletion. The API will follow the same structure as the chunk storage endpoints for deletion, which is also based on the Prometheus deletion API. + +This will enable the following endpoints for Cortex when using block storage: + +`POST /api/v1/admin/tsdb/delete_series` - Accepts [Prometheus style delete request](https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series) for deleting series. + +Parameters: + +- `start=` + - Optional. If not provided, will be set to minimum possible time. +- `end= ` + - Optional. If not provided, will be set to maximum possible time (time when request was made). End time cannot be greater than the current UTC time. +- `match[]=` + - Cannot be empty, must contain at least one label matcher argument. + + +`POST /api/v1/admin/tsdb/cancel_delete_request` - To cancel a request if it has not been processed yet for permanent deletion. This can only be done before the `-purger.delete-request-cancel-period` has passed. +Parameters: + +- `request_id` + +`GET /api/v1/admin/tsdb/delete_series` - Get all delete requests id’s and their current status. + +Prometheus also implements a [clean_tombstones](https://prometheus.io/docs/prometheus/latest/querying/api/#clean-tombstones) API which is not included in this proposal. The tombstones will be deleted automatically once the permanent deletion has taken place which is described in the section below. By default, this should take approximately 24 hours. + +### Deletion Lifecycle + +The deletion request lifecycle can follow these 3 states: + +1. Pending - Tombstone file is created. During this state, the queriers will be performing query time filtering. The initial time period configured by `-purger.delete-request-cancel-period`, no data will be deleted. Once this period is over, permanent deletion processing will begin and the request is no longer cancellable. +2. Processed - All requested data has been deleted. Initially, will still need to do query time filtering while waiting for the bucket index and store-gateway to pick up the new blocks. Once that period has passed, will no longer require any query time filtering. +3. Deleted - The deletion request was cancelled. A grace period configured by `-purger.delete-request-cancel-period` will allow the user some time to cancel the deletion request if it was made by mistake. The request is no longer cancelable after this period has passed. + + + +### Filtering data during queries while not yet deleted: + +Once a deletion request is received, a tombstone entry will be created. The object store such as S3, GCS, Azure storage, can be used to store all the deletion requests. See the section below for more detail on how the tombstones will be stored. Using the tombstones, the querier will be able to filter the to-be-deleted data initially. If a cancel delete request is made, then the tombstone file will be deleted. In addition, the existing cache will be invalidated using cache generation numbers, which are described in the later sections. + +The compactor's _BlocksCleaner_ service will scan for new tombstone files and will update the bucket-index with the tombstone information regarding the deletion requests. This will enable the querier to periodically check the bucket index if there are any new tombstone files that are required to be used for filtering. One drawback of this approach is the time it could take to start filtering the data. Since the compactor will update the bucket index with the new tombstones every `-compactor.cleanup-interval` (default 15 min). Then the cached bucket index is refreshed in the querier every `-blocks-storage.bucket-store.sync-interval` (default 15 min). Potentially could take almost 30 min for queriers to start filtering deleted data when using the default values. If the information requested for deletion is confidential/classified, the time delay is something that the user should be aware of, in addition to the time that the data has already been in Cortex. + +An additional thing to consider is that this would mean that the bucket-index would have to be enabled for this API to work. Since the plan is to make to the bucket-index mandatory in the future for block storage, this shouldn't be an issue. + +Similar to the chunk storage deletion implementation, the initial filtering of the deleted data will be done inside the Querier. This will allow filtering the data read from both the store gateway and the ingester. This functionality already exists for the chunk storage implementation. By implementing it in the querier, this would mean that the ruler will be supported too (ruler internally runs the querier). + +#### Storing tombstones in object store + + +The Purger will write the new tombstone entries in a separate folder called `tombstones` in the object store (e.g. S3 bucket) in the respective tenant folder. Each tombstone can have a separate JSON file outlining all the necessary information about the deletion request such as the parameters passed in the request, as well as some meta-data such as the creation date of the file. The name of the file can be a hash of the API parameters (start, end, markers). This way if a user calls the API twice by accident with the same parameters, it will only create one tombstone. To keep track of the request state, filename extensions can be used. This will allow the tombstone files to be immutable. The 3 different file extensions will be `pending, processed, deleted`. Each time the deletion request moves to a new state, a new file will be added with the same deletion information but a different extension to indicate the new state. The file containing the previous state will be deleted once the new one is created. If a deletion request is cancelled, then a tombstone file with the `.deleted` filename extension will be created. + +When it is determined that the request should move to the next state, then it will first write a new file containing the tombstone information to the object store. The information inside the file will be the same except the `stateCreationTime`, which is replaced with the current timestamp. The extension of the new file will be different to reflect the new state. If the new file is successfully written, the file with the previous state is deleted. If the write of the new file fails, then the previous file is not going to be deleted. Next time the service runs to check the state of each tombstone, it will retry creating the new file with the updated state. If the write is successful but the deletion of the old file is unsuccessful then there will be 2 tombstone files with the same filename but different extension. When `BlocksCleaner` writes the tombstones to the bucket index, the compactor will check for duplicate tombstone files but with different extensions. It will use the tombstone with the most recently updated state and try to delete the file with the older state. There could be a scenario where there are two files with the same request ID but different extensions: {`.pending`, `.processed`} or {`.pending`, `.deleted`}. In this case, the `.processed` or `.deleted ` file will be selected as it is always the later state compared to the `pending` state. + +The tombstone will be stored in a single JSON file per request and state: + +- `//tombstones/.json.` + + +The schema of the JSON file is: + + +``` +{ + "requestId": , + "startTime": , + "endTime": , + "requestCreationTime": , + "stateCreationTime": , + "matchers": [ + "", + .., + "" + ] + }, + "userID": , +} +``` + + +Pros: +- Allows deletion and un-delete to be done in a single operation. + +Cons: + +- Negative impact on query performance when there are active tombstones. As in the chunk storage implementation, all the series will have to be compared to the matchers contained in the active tombstone files. The impact on performance should be the same as the deletion would have with chunk storage. + +- With the default config, potential 30 minute wait for the data to begin filtering if using the default configuration. + +#### Invalidating cache + +Using block store, the different caches available are: +- Index cache +- Metadata cache +- Chunks cache (stores the potentially to be deleted chunks of data) +- Query results cache (stores the potentially to be deleted data) + +There are two potential caches that could contain deleted data, the chunks cache, and the query results cache. Using the tombstones, the queriers filter out the data received from the ingesters and store-gateway. The cache not being processed through the querier needs to be invalidated to prevent deleted data from coming up in queries. + +Firstly, the query results cache needs to be invalidated for each new delete request or a cancellation of one. This can be accomplished by utilizing cache generation numbers. For each tenant, their cache is prefixed with a cache generation number. When the query front-end discovers a cache generation number that is greater than the previous generation number, then it knows to invalidate the query results cache. However, the cache can only be invalidated once the queriers have loaded the tombstones from the bucket index and have begun filtering the data. Otherwise, to-be deleted data might show up in queries and be cached again. One of the way to guarantee that all the queriers are using the new tombstones is to wait until the bucket index staleness period has passed from the time the tombstones have been written to the bucket index. The staleness period can be configured using the following flag: `-blocks-storage.bucket-store.bucket-index.max-stale-period`. We can use the bucket index staleness period as the delay to wait before the cache generation number is increased. A query will fail inside the querier, if the bucket index last update is older the staleness period. Once this period is over, all the queriers should have the updated tombstones and the query results cache can be invalidated. Here is the proposed method for accomplishing this: + + +- The cache generation number will be a timestamp. The default value will be 0. +- The bucket index will store the cache generation number. The query front-end will periodically fetch the bucket index. +- Inside the compactor, the _BlocksCleaner_ will load the tombstones from object store and update the bucket index accordingly. It will calculate the cache generation number by iterating through all the tombstones and their respective times (next bullet point) and selecting the maximum timestamp that is less than (current time minus `-blocks-storage.bucket-store.bucket-index.max-stale-period`). This would mean that if a deletion request is made or cancelled, the compactor will only update the cache generation number once the staleness period is over, ensuring that all queriers have the updated tombstones. +- For requests in a pending or processed state, the `requestCreationTime` will be used when comparing the maximum timestamps. If a request is in a deleted state, it will use the `stateCreationTime` for comparing the timestamps. This means that the cache gets invalidated only once it has been created or deleted, and the bucket index staleness period has passed. The cache will not be invalidated again when a request advances from pending to processed state. +- The query front-end will fetch the cache generation number from the bucket index. The query front end will compare it to the current cache generation number stored in the front-end. If the cache generation number from the front-end is less than the one from bucket index, then the cache is invalidated. + +In regards to the chunks cache, since it is retrieved from the store gateway and passed to the querier, it will be filtered out like the rest of the time series data in the querier using the tombstones, with the mechanism described in the previous section. + +### Permanently deleting the data + +The proposed approach is to perform the deletions from the compactor. A new background service inside the compactor called _DeletedSeriesCleaner_ can be created and is responsible for executing the deletion. + +#### Processing + + +This will happen after a grace period has passed once the API request has been made. By default this should be 24 hours. A background task can be created to process the permanent deletion of time series. This background task can be executed each hour. + +To delete the data from the blocks, the same logic as the [Bucket Rewrite Tool](https://thanos.io/tip/components/tools.md/#bucket-rewrite +) from Thanos can be leveraged. This tool does the following: `tools bucket rewrite rewrites chosen blocks in the bucket, while deleting or modifying series`. The tool itself is a CLI tool that we won’t be using, but instead we can utilize the logic inside it. For more information about the way this tool runs, please see the code [here](https://github.com/thanos-io/thanos/blob/d8b21e708bee6d19f46ca32b158b0509ca9b7fed/cmd/thanos/tools_bucket.go#L809). + +The compactor’s _DeletedSeriesCleaner_ will apply this logic on individual blocks and each time it is run, it creates a new block without the data that matched the deletion request. The original individual blocks containing the data that was requested to be deleted, need to be marked for deletion by the compactor. + +While deleting the data permanently from the block storage, the `meta.json` files will be used to keep track of the deletion progress. Inside each `meta.json` file, we will add a new field called `tombstonesFiltered`. This will store an array of deletion request id's that were used to create this block. Once the rewrite logic is applied to a block, the new block's `meta.json` file will append the deletion request id(s) used for the rewrite operation inside this field. This will let the _DeletedSeriesCleaner_ know that this block has already processed the particular deletions requests listed in this field. Assuming that the deletion requests are quite rare, the size of the meta.json files should remain small. + +The _DeletedSeriesCleaner_ can iterate through all the blocks that the deletion request could apply to. For each of these blocks, if the deletion request ID isn't inside the meta.json `tombstonesFiltered` field, then the compactor can apply the rewrite logic to this block. If there are multiple tombstones that are currently being processing for deletions and apply to a particular block, then the _DeletedSeriesCleaner_ will process both at the same time to prevent additional blocks from being created. If after iterating through all the blocks, it doesn’t find any such blocks requiring deletion, then the `Pending` state is complete and the request progresses to the `Processed` state. + +One important thing to note regarding this rewrite tool is that it should not be used at the same time as when another compactor is touching a block. If the tool is run at the same time as compaction on a particular block, it can cause overlap and the data marked for deletion can already be part of the compacted block. To mitigate such issues, these are some of the proposed solutions: + +Option 1: Only apply the deletion once the blocks are in the final state of compaction. + +Pros: +- Simpler implementation as everything is contained within the DeletedSeriesCleaner. + +Cons: +- Might have to wait for a longer period of time for the compaction to be finished. + - This would mean the earliest time to be able to run the deletion would be once the last time from the block_ranges in the [compactor_config](https://cortexmetrics.io/docs/blocks-storage/compactor/#compactor-configuration) has passed. By default this value is 24 hours, so only once 24 hours have passed and the new compacted blocks have been created, then the rewrite can be safely run. + + + + +Option 2: For blocks that still need to be compacted further after the deletion request cancel period is over, the deletion logic can be applied before the blocks are compacted. This will generate a new block which can then be used instead for compaction with other blocks. + +Pros: +- The deletion can be applied earlier than the previous options. + - Only applies if the deletion request cancel period is less than the last time interval for compaction is. +Cons: +- Added coupling between the compaction and the DeletedSeriesCleaner. +- Might block compaction for a short time while doing the deletion. + + + +Once all the applicable blocks have been rewritten without the deleted data, the deletion request state moves to the `Processed` state. Once in this state, the queriers will still have to perform query time filtering using the tombstones until the old blocks that were marked for deletion are no longer queried by the queriers. This will mean that the query time filtering will last for an additional length of `-compactor.deletion-delay + -compactor.cleanup-interval + -blocks-storage.bucket-store.sync-interval` in the `Processed` state. Once that time period has passed, the queriers should no longer be querying any of the old blocks that were marked for deletion. The tombstone will no longer be used after this. + + +#### Cancelled Delete Requests + +If a request was successfully cancelled, then a tombstone file a `.deleted` extension is created. This is done to help ensure that the cache generation number is updated and the query results cache is invalidated. The compactor's blocks cleaner can take care of cleaning up `.deleted` tombstones after a period of time of when they are no longer required for cache invalidation. This can be done after 10 times the bucket index max staleness time period has passed. Before removing the file from the object store, the current cache generation number must greater than or equal to when the tombstone was cancelled. + +#### Handling failed/unfinished delete jobs: + +Deletions will be completed and the tombstones will be deleted only when the DeletedSeriesCleaner iterates over all blocks that match the time interval and confirms that they have been re-written without the deleted data. Otherwise, it will keep iterating over the blocks and process the blocks that haven't been rewritten according to the information in the `meta.json` file. In case of any failure that causes the deletion to stop, any unfinished deletions will be resumed once the service is restarted. If the block rewrite was not completed on a particular block, then the original block will not be marked for deletion. The compactor will continue to iterate over the blocks and process the block again. + + +#### Tenant Deletion API + +If a request is made to delete a tenant, then all the tombstones will be deleted for that user. + + +## Current Open Questions: + +- If the start and end time is very far apart, it might result in a lot of the data being re-written. Since we create a new block without the deleted data and mark the old one for deletion, there may be a period of time with lots of extra blocks and space used for large deletion queries. +- There will be a delay between the deletion request and the deleted data being filtered during queries. + - In Prometheus, there is no delay. + - One way to filter out immediately is to load the tombstones during query time but this will cause a negative performance impact. +- Adding limits to the API such as: + - Max number of deletion requests allowed in the last 24 hours for a given tenent. + - Max number of pending tombstones for a given tenant. + + +## Alternatives Considered + + +#### Adding a Pre-processing State + +The process of permanently deleting the data can be separated into 2 stages, preprocessing and processing. + +Pre-processing will begin after the `-purger.delete-request-cancel-period` has passed since the API request has been made. The deletion request will move to a new state called `BuildingPlan`. The compactor will outline all the blocks that may contain data to be deleted. For each separate block that the deletion may be applicable to, the compactor will begin the process by adding a series deletion marker inside the series-deletion-marker.json file. The JSON file will contain an array of deletion request id's that need to be applied to the block, which allows the ability to handle the situation when there are multiple tombstones that could be applicable to a particular block. Then during the processing step, instead of checking the meta.json file, we only need to check if a marker file exists with a specific deletion request id. If the marker file exists, then we apply the rewrite logic. + +#### Alternative Permanent Deletion Processing + +For processing the actual deletions, an alternative approach is not to wait until the final compaction has been completed and filter out the data during compaction. If the data is marked to be deleted, then don’t include it the new bigger block during compaction. For the remaining blocks where the data wasn’t filtered during compaction, the deletion can be done the same as in the previous section. + +Pros: + +- The deletion can happen sooner. +- The rewrite tools creates additional blocks. By filtering the metrics during compaction, the intermediary re-written block will be avoided. + +Cons: + +- A more complicated implementation requiring add more logic to the compactor +- Slower compaction if it needs to filter all the data +- Need to manage which blocks should be deleted with the rewrite vs which blocks already had data filtered during compaction. +- Would need to run the rewrite logic during and outside of compaction because some blocks that might need to be deleted are already in the final compaction state. So that would mean the deletion functionality has to be implemented in multiple places. +- Won’t be leveraging the rewrites tools from Thanos for all the deletion, so potentially more work is duplicated + From b0365b72f067ffa47d8564b29f9fe8cb348d09c9 Mon Sep 17 00:00:00 2001 From: Albert Date: Mon, 12 Jul 2021 10:21:05 -0700 Subject: [PATCH 015/137] update changelog Signed-off-by: Albert Signed-off-by: Alvin Lin --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bc8af553da..e966e42fa76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ * [ENHANCEMENT] Memberlist: optimized receive path for processing ring state updates, to help reduce CPU utilization in large clusters. #4345 * [ENHANCEMENT] Memberlist: expose configuration of memberlist packet compression via `-memberlist.compression=enabled`. #4346 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 -* [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4318 +* [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4357 ## 1.10.0-rc.0 / 2021-06-28 From 205ed16945eedffaedcce8bab53a6f5b30eb6e6d Mon Sep 17 00:00:00 2001 From: Albert Date: Tue, 13 Jul 2021 10:23:03 -0700 Subject: [PATCH 016/137] Add unit tests for shuffle sharding planner Signed-off-by: Albert Signed-off-by: Alvin Lin --- .../shuffle_sharding_grouper_test.go | 2 +- .../shuffle_sharding_planner_test.go | 142 ++++++++++++++++++ 2 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 pkg/compactor/shuffle_sharding_planner_test.go diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go index 5f6429dbb78..7e553fbc6e2 100644 --- a/pkg/compactor/shuffle_sharding_grouper_test.go +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -12,7 +12,7 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" ) -func TestTimeShardingGrouper_Groups(t *testing.T) { +func TestShuffleShardingGrouper_Groups(t *testing.T) { block1ulid := ulid.MustNew(1, nil) block2ulid := ulid.MustNew(2, nil) block3ulid := ulid.MustNew(3, nil) diff --git a/pkg/compactor/shuffle_sharding_planner_test.go b/pkg/compactor/shuffle_sharding_planner_test.go new file mode 100644 index 00000000000..fdf63c9207c --- /dev/null +++ b/pkg/compactor/shuffle_sharding_planner_test.go @@ -0,0 +1,142 @@ +package compactor + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/oklog/ulid" + "github.com/prometheus/prometheus/tsdb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/block/metadata" +) + +func TestShuffleShardingPlanner_Plan(t *testing.T) { + block1ulid := ulid.MustNew(1, nil) + block2ulid := ulid.MustNew(2, nil) + + tests := map[string]struct { + ranges []int64 + blocks []*metadata.Meta + expected []*metadata.Meta + expectedErr error + }{ + "test basic plan": { + ranges: []int64{2 * time.Hour.Milliseconds()}, + blocks: []*metadata.Meta{ + { + BlockMeta: tsdb.BlockMeta{ + ULID: block1ulid, + MinTime: 1 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + { + BlockMeta: tsdb.BlockMeta{ + ULID: block2ulid, + MinTime: 1 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + }, + expected: []*metadata.Meta{ + { + BlockMeta: tsdb.BlockMeta{ + ULID: block1ulid, + MinTime: 1 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + { + BlockMeta: tsdb.BlockMeta{ + ULID: block2ulid, + MinTime: 1 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + }, + }, + "test blocks outside largest range smaller min time after": { + ranges: []int64{2 * time.Hour.Milliseconds()}, + blocks: []*metadata.Meta{ + { + BlockMeta: tsdb.BlockMeta{ + ULID: block1ulid, + MinTime: 2 * time.Hour.Milliseconds(), + MaxTime: 4 * time.Hour.Milliseconds(), + }, + }, + { + BlockMeta: tsdb.BlockMeta{ + ULID: block2ulid, + MinTime: 0 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + }, + expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block2ulid.String(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds()), + }, + "test blocks outside largest range 1": { + ranges: []int64{2 * time.Hour.Milliseconds()}, + blocks: []*metadata.Meta{ + { + BlockMeta: tsdb.BlockMeta{ + ULID: block1ulid, + MinTime: 0 * time.Hour.Milliseconds(), + MaxTime: 4 * time.Hour.Milliseconds(), + }, + }, + { + BlockMeta: tsdb.BlockMeta{ + ULID: block2ulid, + MinTime: 0 * time.Hour.Milliseconds(), + MaxTime: 4 * time.Hour.Milliseconds(), + }, + }, + }, + expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block1ulid.String(), 0*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds()), + }, + "test blocks outside largest range 2": { + ranges: []int64{2 * time.Hour.Milliseconds()}, + blocks: []*metadata.Meta{ + { + BlockMeta: tsdb.BlockMeta{ + ULID: block1ulid, + MinTime: 0 * time.Hour.Milliseconds(), + MaxTime: 2 * time.Hour.Milliseconds(), + }, + }, + { + BlockMeta: tsdb.BlockMeta{ + ULID: block2ulid, + MinTime: 0 * time.Hour.Milliseconds(), + MaxTime: 4 * time.Hour.Milliseconds(), + }, + }, + }, + expectedErr: fmt.Errorf("block %s with time range %d:%d is outside the largest expected range %d:%d", block2ulid.String(), 0*time.Hour.Milliseconds(), 4*time.Hour.Milliseconds(), 0*time.Hour.Milliseconds(), 2*time.Hour.Milliseconds()), + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + p := NewShuffleShardingPlanner(nil, + testData.ranges) + actual, err := p.Plan(context.Background(), testData.blocks) + + if testData.expectedErr != nil { + assert.Equal(t, err, testData.expectedErr) + } else { + require.NoError(t, err) + } + + require.Len(t, actual, len(testData.expected)) + + for idx, expectedMeta := range testData.expected { + assert.Equal(t, expectedMeta.ULID, actual[idx].ULID) + } + }) + } +} From 65e3e5d6ba70737a617e5f0c006a9004aa794616 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Mon, 12 Jul 2021 10:35:57 +0200 Subject: [PATCH 017/137] Fix integration TestAlertmanagerSharding flaking. (#4353) This change will make the test reliable until I can spend more time investigating. It seems that posting alerts is not fully synchronous as was expected when writing the test, so the test can be fixed by waiting for all instances to receive the alert. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- integration/alertmanager_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/integration/alertmanager_test.go b/integration/alertmanager_test.go index 28ad75f6a29..79a86527c5c 100644 --- a/integration/alertmanager_test.go +++ b/integration/alertmanager_test.go @@ -531,6 +531,12 @@ func TestAlertmanagerSharding(t *testing.T) { require.NoError(t, err) err = c3.SendAlertToAlermanager(context.Background(), alert(3, 2)) require.NoError(t, err) + + // Wait for the alerts to be received by every replica. + require.NoError(t, alertmanagers.WaitSumMetricsWithOptions( + e2e.Equals(float64(3*testCfg.replicationFactor)), + []string{"cortex_alertmanager_alerts_received_total"}, + e2e.SkipMissingMetrics)) } // Endpoint: GET /v1/alerts From d5dc01c1ddd3aba2d6e85ddc7548076076ef220e Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Tue, 13 Jul 2021 11:34:10 +0200 Subject: [PATCH 018/137] Fix integration TestSingleBinaryWithMemberlistScaling flaking. (#4361) This appears to be highlighting an issue - raised as #4360. This change just stops the test flaking until it can be fixed. Signed-off-by: Steve Simpson Signed-off-by: Alvin Lin --- integration/integration_memberlist_single_binary_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/integration/integration_memberlist_single_binary_test.go b/integration/integration_memberlist_single_binary_test.go index f4cd7b79140..ee62cd2f238 100644 --- a/integration/integration_memberlist_single_binary_test.go +++ b/integration/integration_memberlist_single_binary_test.go @@ -197,6 +197,13 @@ func TestSingleBinaryWithMemberlistScaling(t *testing.T) { c := instances[i] instances = instances[:i] stop.Go(func() error { return s.Stop(c) }) + + // TODO(#4360): Remove this when issue is resolved. + // Wait until memberlist for all nodes has recognised the instance left. + // This means that we will not gossip tombstones to leaving nodes. + for _, c := range instances { + require.NoError(t, c.WaitSumMetrics(e2e.Equals(float64(len(instances))), "memberlist_client_cluster_members_count")) + } } require.NoError(t, stop.Wait()) From f1f36429ba06f5efbed42ce0ee779fd59cf987ef Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Tue, 13 Jul 2021 16:38:09 +0200 Subject: [PATCH 019/137] Update go version in build image to 1.16.6 (#4362) * Update go version in build image to 1.16.6 This addresses CVE-2021-34558. https://github.com/golang/go/issues/47143 Signed-off-by: Christian Simon * Update build image and golang version in CI Signed-off-by: Marco Pracucci * Update LATEST_BUILD_IMAGE_TAG in makefile Signed-off-by: Christian Simon Co-authored-by: Marco Pracucci Signed-off-by: Alvin Lin --- .github/workflows/test-build-deploy.yml | 16 ++++++++-------- CHANGELOG.md | 1 + Makefile | 2 +- build-image/Dockerfile | 2 +- .../how-to-upgrade-golang-version.md | 1 + 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index db103257ad2..2ec9971069f 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -10,7 +10,7 @@ jobs: lint: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:build-image-multiarch-1d2497ff6 + image: quay.io/cortexproject/build-image:20210713_update-go-1.16.6-178ab0c4f steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -34,7 +34,7 @@ jobs: test: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:build-image-multiarch-1d2497ff6 + image: quay.io/cortexproject/build-image:20210713_update-go-1.16.6-178ab0c4f services: cassandra: image: cassandra:3.11 @@ -55,7 +55,7 @@ jobs: build: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:build-image-multiarch-1d2497ff6 + image: quay.io/cortexproject/build-image:20210713_update-go-1.16.6-178ab0c4f steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -98,8 +98,8 @@ jobs: - name: Upgrade golang run: | cd /tmp - wget https://dl.google.com/go/go1.16.3.linux-amd64.tar.gz - tar -zxvf go1.16.3.linux-amd64.tar.gz + wget https://dl.google.com/go/go1.16.6.linux-amd64.tar.gz + tar -zxvf go1.16.6.linux-amd64.tar.gz sudo rm -fr /usr/local/go sudo mv /tmp/go /usr/local/go cd - @@ -174,14 +174,14 @@ jobs: run: | touch build-image/.uptodate MIGRATIONS_DIR=$(pwd)/cmd/cortex/migrations - make BUILD_IMAGE=quay.io/cortexproject/build-image:build-image-multiarch-1d2497ff6 TTY='' configs-integration-test + make BUILD_IMAGE=quay.io/cortexproject/build-image:20210713_update-go-1.16.6-178ab0c4f TTY='' configs-integration-test deploy_website: needs: [build, test] if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:build-image-multiarch-1d2497ff6 + image: quay.io/cortexproject/build-image:20210713_update-go-1.16.6-178ab0c4f steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -218,7 +218,7 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:build-image-multiarch-1d2497ff6 + image: quay.io/cortexproject/build-image:20210713_update-go-1.16.6-178ab0c4f steps: - name: Checkout Repo uses: actions/checkout@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index e966e42fa76..d7a52d3ff51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## master / unreleased * [FEATURE] Ruler: Add new `-ruler.query-stats-enabled` which when enabled will report the `cortex_ruler_query_seconds_total` as a per-user metric that tracks the sum of the wall time of executing queries in the ruler in seconds. #4317 +* [CHANGE] Update Go version to 1.16.6. #4362 * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [CHANGE] Memberlist: the `memberlist_kv_store_value_bytes` has been removed due to values no longer being stored in-memory as encoded bytes. #4345 * [ENHANCEMENT] Add timeout for waiting on compactor to become ACTIVE in the ring. #4262 diff --git a/Makefile b/Makefile index 0cac289efd1..779b5ae4762 100644 --- a/Makefile +++ b/Makefile @@ -119,7 +119,7 @@ build-image/$(UPTODATE): build-image/* SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E") BUILD_IN_CONTAINER := true BUILD_IMAGE ?= $(IMAGE_PREFIX)build-image -LATEST_BUILD_IMAGE_TAG ?= build-image-multiarch-1d2497ff6 +LATEST_BUILD_IMAGE_TAG ?= 20210713_update-go-1.16.6-178ab0c4f # TTY is parameterized to allow Google Cloud Builder to run builds, # as it currently disallows TTY devices. This value needs to be overridden diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 263996ff60a..f229886d437 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.16.3-buster +FROM golang:1.16.6-buster ARG goproxyValue ENV GOPROXY=${goproxyValue} RUN apt-get update && apt-get install -y curl python-requests python-yaml file jq unzip protobuf-compiler libprotobuf-dev && \ diff --git a/docs/contributing/how-to-upgrade-golang-version.md b/docs/contributing/how-to-upgrade-golang-version.md index f1c630d77b5..9c700d5fbbd 100644 --- a/docs/contributing/how-to-upgrade-golang-version.md +++ b/docs/contributing/how-to-upgrade-golang-version.md @@ -14,6 +14,7 @@ To upgrade the Golang version: - Update the Docker image tag in `.github/workflows/*` 2. Upgrade integration tests version - Update the Golang version installed in the `integration` job in `.github/workflows/*` +3. Upgrade the reference to the latest build image called `LATEST_BUILD_IMAGE_TAG` in `Makefile` If the minimum support Golang version should be upgraded as well: From ff72ed660523f4586093f101c81cc4e7904e999f Mon Sep 17 00:00:00 2001 From: Goutham Veeramachaneni Date: Wed, 14 Jul 2021 18:36:42 +0200 Subject: [PATCH 020/137] Add metric name to metadata error (#4363) Signed-off-by: Goutham Veeramachaneni Signed-off-by: Alvin Lin --- pkg/ingester/user_metrics_metadata.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/ingester/user_metrics_metadata.go b/pkg/ingester/user_metrics_metadata.go index ea64df9f8ef..11fe0d9828c 100644 --- a/pkg/ingester/user_metrics_metadata.go +++ b/pkg/ingester/user_metrics_metadata.go @@ -4,6 +4,8 @@ import ( "sync" "time" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -49,7 +51,7 @@ func (mm *userMetricsMetadata) add(metric string, metadata *cortexpb.MetricMetad if err := mm.limiter.AssertMaxMetadataPerMetric(mm.userID, len(set)); err != nil { validation.DiscardedMetadata.WithLabelValues(mm.userID, perMetricMetadataLimit).Inc() - return makeLimitError(perMetricMetadataLimit, mm.limiter.FormatError(mm.userID, err)) + return makeMetricLimitError(perMetricMetadataLimit, labels.FromStrings(labels.MetricName, metric), mm.limiter.FormatError(mm.userID, err)) } // if we have seen this metadata before, it is a no-op and we don't need to change our metrics. From 11a31e10c3ae3dd3c9ac3947bbc90f6c0213e72f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 20 Jul 2021 15:40:05 +0200 Subject: [PATCH 021/137] Fix ruler query failure reporting (#4335) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * This patch tries to fix problem with user-errors reported as internal errors, and adds integration test for it. Signed-off-by: Peter Štibraný * Allow passing custom error-wrapping function to ErrorTranslateQueryable. Signed-off-by: Peter Štibraný * Wrap errors returned by Queryable to custom wrapper. This allows us to distinguish between those errors and errors returned by PromQL engine, and react appropriately. Signed-off-by: Peter Štibraný * Improve ruler test to check for more scenarios. Signed-off-by: Peter Štibraný * CHANGELOG.md Signed-off-by: Peter Štibraný Signed-off-by: Alvin Lin --- CHANGELOG.md | 4 +- integration/e2e/db/db.go | 12 +- integration/e2ecortex/client.go | 5 + integration/ruler_test.go | 160 ++++++++++++++++++ pkg/api/handlers.go | 2 +- pkg/querier/error_translate_queryable.go | 72 +++++--- pkg/querier/error_translate_queryable_test.go | 2 +- pkg/ruler/compat.go | 57 ++++++- pkg/ruler/compat_test.go | 2 +- 9 files changed, 278 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d7a52d3ff51..c93e39c0a7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## master / unreleased * [FEATURE] Ruler: Add new `-ruler.query-stats-enabled` which when enabled will report the `cortex_ruler_query_seconds_total` as a per-user metric that tracks the sum of the wall time of executing queries in the ruler in seconds. #4317 - +* [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4357 * [CHANGE] Update Go version to 1.16.6. #4362 * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [CHANGE] Memberlist: the `memberlist_kv_store_value_bytes` has been removed due to values no longer being stored in-memory as encoded bytes. #4345 @@ -25,7 +25,7 @@ * [ENHANCEMENT] Memberlist: optimized receive path for processing ring state updates, to help reduce CPU utilization in large clusters. #4345 * [ENHANCEMENT] Memberlist: expose configuration of memberlist packet compression via `-memberlist.compression=enabled`. #4346 * [BUGFIX] HA Tracker: when cleaning up obsolete elected replicas from KV store, tracker didn't update number of cluster per user correctly. #4336 -* [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4357 +* [BUGFIX] Ruler: fixed counting of PromQL evaluation errors as user-errors when updating `cortex_ruler_queries_failed_total`. #4335 ## 1.10.0-rc.0 / 2021-06-28 diff --git a/integration/e2e/db/db.go b/integration/e2e/db/db.go index fe6de79411b..ed397bbda82 100644 --- a/integration/e2e/db/db.go +++ b/integration/e2e/db/db.go @@ -14,18 +14,22 @@ const ( ) // NewMinio returns minio server, used as a local replacement for S3. -func NewMinio(port int, bktName string) *e2e.HTTPService { +func NewMinio(port int, bktNames ...string) *e2e.HTTPService { minioKESGithubContent := "https://raw.githubusercontent.com/minio/kes/master" commands := []string{ - "curl -sSL --tlsv1.2 -O '%s/root.key' -O '%s/root.cert'", - "mkdir -p /data/%s && minio server --address :%v --quiet /data", + fmt.Sprintf("curl -sSL --tlsv1.2 -O '%s/root.key' -O '%s/root.cert'", minioKESGithubContent, minioKESGithubContent), } + for _, bkt := range bktNames { + commands = append(commands, fmt.Sprintf("mkdir -p /data/%s", bkt)) + } + commands = append(commands, fmt.Sprintf("minio server --address :%v --quiet /data", port)) + m := e2e.NewHTTPService( fmt.Sprintf("minio-%v", port), images.Minio, // Create the "cortex" bucket before starting minio - e2e.NewCommandWithoutEntrypoint("sh", "-c", fmt.Sprintf(strings.Join(commands, " && "), minioKESGithubContent, minioKESGithubContent, bktName, port)), + e2e.NewCommandWithoutEntrypoint("sh", "-c", strings.Join(commands, " && ")), e2e.NewHTTPReadinessProbe(port, "/minio/health/ready", 200, 200), port, ) diff --git a/integration/e2ecortex/client.go b/integration/e2ecortex/client.go index 4b47849dfc9..72fed34997e 100644 --- a/integration/e2ecortex/client.go +++ b/integration/e2ecortex/client.go @@ -318,6 +318,11 @@ func (c *Client) SetRuleGroup(rulegroup rulefmt.RuleGroup, namespace string) err } defer res.Body.Close() + + if res.StatusCode != 202 { + return fmt.Errorf("unexpected status code: %d", res.StatusCode) + } + return nil } diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 885f84a645a..08dec2e8a39 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -523,6 +523,166 @@ func TestRulerAlertmanagerTLS(t *testing.T) { require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_prometheus_notifications_alertmanagers_discovered"}, e2e.WaitMissingMetrics)) } +func TestRulerMetricsForInvalidQueries(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, bucketName, rulestoreBucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Configure the ruler. + flags := mergeFlags( + BlocksStorageFlags(), + RulerFlags(false), + map[string]string{ + // Since we're not going to run any rule (our only rule is invalid), we don't need the + // store-gateway to be configured to a valid address. + "-querier.store-gateway-addresses": "localhost:12345", + // Enable the bucket index so we can skip the initial bucket scan. + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + // Evaluate rules often, so that we don't need to wait for metrics to show up. + "-ruler.evaluation-interval": "2s", + "-ruler.poll-interval": "2s", + // No delay + "-ruler.evaluation-delay-duration": "0", + + "-blocks-storage.tsdb.block-ranges-period": "1h", + "-blocks-storage.bucket-store.sync-interval": "1s", + "-blocks-storage.tsdb.retention-period": "2h", + + // We run single ingester only, no replication. + "-distributor.replication-factor": "1", + + // Very low limit so that ruler hits it. + "-querier.max-fetched-chunks-per-query": "5", + // We need this to make limit work. + "-ingester.stream-chunks-when-using-blocks": "true", + }, + ) + + const namespace = "test" + const user = "user" + + distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), flags, "") + ruler := e2ecortex.NewRuler("ruler", consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester, ruler)) + + // Wait until both the distributor and ruler have updated the ring. The querier will also watch + // the store-gateway ring if blocks sharding is enabled. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + require.NoError(t, ruler.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", ruler.HTTPEndpoint(), user) + require.NoError(t, err) + + // Push some series to Cortex -- enough so that we can hit some limits. + for i := 0; i < 10; i++ { + series, _ := generateSeries("metric", time.Now(), prompb.Label{Name: "foo", Value: fmt.Sprintf("%d", i)}) + + res, err := c.Push(series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + } + + totalQueries, err := ruler.SumMetrics([]string{"cortex_ruler_queries_total"}) + require.NoError(t, err) + + // Verify that user-failures don't increase cortex_ruler_queries_failed_total + for groupName, expression := range map[string]string{ + // Syntactically correct expression (passes check in ruler), but failing because of invalid regex. This fails in PromQL engine. + "invalid_group": `label_replace(metric, "foo", "$1", "service", "[")`, + + // This one fails in querier code, because of limits. + "too_many_chunks_group": `sum(metric)`, + } { + t.Run(groupName, func(t *testing.T) { + require.NoError(t, c.SetRuleGroup(ruleGroupWithRule(groupName, "rule", expression), namespace)) + m := ruleGroupMatcher(user, namespace, groupName) + + // Wait until ruler has loaded the group. + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_prometheus_rule_group_rules"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) + + // Wait until rule group has tried to evaluate the rule. + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(1), []string{"cortex_prometheus_rule_evaluations_total"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) + + // Verify that evaluation of the rule failed. + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(1), []string{"cortex_prometheus_rule_evaluation_failures_total"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) + + // But these failures were not reported as "failed queries" + sum, err := ruler.SumMetrics([]string{"cortex_ruler_queries_failed_total"}) + require.NoError(t, err) + require.Equal(t, float64(0), sum[0]) + + // Delete rule before checkin "cortex_ruler_queries_total", as we want to reuse value for next test. + require.NoError(t, c.DeleteRuleGroup(namespace, groupName)) + + // Wait until ruler has unloaded the group. We don't use any matcher, so there should be no groups (in fact, metric disappears). + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.Equals(0), []string{"cortex_prometheus_rule_group_rules"}, e2e.SkipMissingMetrics)) + + // Check that cortex_ruler_queries_total went up since last test. + newTotalQueries, err := ruler.SumMetrics([]string{"cortex_ruler_queries_total"}) + require.NoError(t, err) + require.Greater(t, newTotalQueries[0], totalQueries[0]) + + // Remember totalQueries for next test. + totalQueries = newTotalQueries + }) + } + + // Now let's upload a non-failing rule, and make sure that it works. + t.Run("real_error", func(t *testing.T) { + const groupName = "good_rule" + const expression = `sum(metric{foo=~"1|2"})` + + require.NoError(t, c.SetRuleGroup(ruleGroupWithRule(groupName, "rule", expression), namespace)) + m := ruleGroupMatcher(user, namespace, groupName) + + // Wait until ruler has loaded the group. + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_prometheus_rule_group_rules"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) + + // Wait until rule group has tried to evaluate the rule, and succeeded. + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(1), []string{"cortex_prometheus_rule_evaluations_total"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.Equals(0), []string{"cortex_prometheus_rule_evaluation_failures_total"}, e2e.WithLabelMatchers(m), e2e.WaitMissingMetrics)) + + // Still no failures. + sum, err := ruler.SumMetrics([]string{"cortex_ruler_queries_failed_total"}) + require.NoError(t, err) + require.Equal(t, float64(0), sum[0]) + + // Now let's stop ingester, and recheck metrics. This should increase cortex_ruler_queries_failed_total failures. + require.NoError(t, s.Stop(ingester)) + + // We should start getting "real" failures now. + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(1), []string{"cortex_ruler_queries_failed_total"})) + }) +} + +func ruleGroupMatcher(user, namespace, groupName string) *labels.Matcher { + return labels.MustNewMatcher(labels.MatchEqual, "rule_group", fmt.Sprintf("/rules/%s/%s;%s", user, namespace, groupName)) +} + +func ruleGroupWithRule(groupName string, ruleName string, expression string) rulefmt.RuleGroup { + // Prepare rule group with invalid rule. + var recordNode = yaml.Node{} + var exprNode = yaml.Node{} + + recordNode.SetString(ruleName) + exprNode.SetString(expression) + + return rulefmt.RuleGroup{ + Name: groupName, + Interval: 10, + Rules: []rulefmt.RuleNode{{ + Record: recordNode, + Expr: exprNode, + }}, + } +} + func createTestRuleGroup(t *testing.T) rulefmt.RuleGroup { t.Helper() diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 79a231c16f4..9af0055db9d 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -194,7 +194,7 @@ func NewQuerierHandler( api := v1.NewAPI( engine, - querier.NewErrorTranslateQueryable(queryable), // Translate errors to errors expected by API. + querier.NewErrorTranslateSampleAndChunkQueryable(queryable), // Translate errors to errors expected by API. nil, // No remote write support. exemplarQueryable, func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} }, diff --git a/pkg/querier/error_translate_queryable.go b/pkg/querier/error_translate_queryable.go index 97c1584d41c..ce9289b18bb 100644 --- a/pkg/querier/error_translate_queryable.go +++ b/pkg/querier/error_translate_queryable.go @@ -69,72 +69,103 @@ func TranslateToPromqlAPIError(err error) error { } } -func NewErrorTranslateQueryable(q storage.SampleAndChunkQueryable) storage.SampleAndChunkQueryable { - return errorTranslateQueryable{q} +// ErrTranslateFn is used to translate or wrap error before returning it by functions in +// storage.SampleAndChunkQueryable interface. +// Input error may be nil. +type ErrTranslateFn func(err error) error + +func NewErrorTranslateQueryable(q storage.Queryable) storage.Queryable { + return NewErrorTranslateQueryableWithFn(q, TranslateToPromqlAPIError) +} + +func NewErrorTranslateQueryableWithFn(q storage.Queryable, fn ErrTranslateFn) storage.Queryable { + return errorTranslateQueryable{q: q, fn: fn} +} + +func NewErrorTranslateSampleAndChunkQueryable(q storage.SampleAndChunkQueryable) storage.SampleAndChunkQueryable { + return NewErrorTranslateSampleAndChunkQueryableWithFn(q, TranslateToPromqlAPIError) +} + +func NewErrorTranslateSampleAndChunkQueryableWithFn(q storage.SampleAndChunkQueryable, fn ErrTranslateFn) storage.SampleAndChunkQueryable { + return errorTranslateSampleAndChunkQueryable{q: q, fn: fn} } type errorTranslateQueryable struct { - q storage.SampleAndChunkQueryable + q storage.Queryable + fn ErrTranslateFn } func (e errorTranslateQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { q, err := e.q.Querier(ctx, mint, maxt) - return errorTranslateQuerier{q: q}, TranslateToPromqlAPIError(err) + return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) +} + +type errorTranslateSampleAndChunkQueryable struct { + q storage.SampleAndChunkQueryable + fn ErrTranslateFn +} + +func (e errorTranslateSampleAndChunkQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + q, err := e.q.Querier(ctx, mint, maxt) + return errorTranslateQuerier{q: q, fn: e.fn}, e.fn(err) } -func (e errorTranslateQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +func (e errorTranslateSampleAndChunkQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { q, err := e.q.ChunkQuerier(ctx, mint, maxt) - return errorTranslateChunkQuerier{q: q}, TranslateToPromqlAPIError(err) + return errorTranslateChunkQuerier{q: q, fn: e.fn}, e.fn(err) } type errorTranslateQuerier struct { - q storage.Querier + q storage.Querier + fn ErrTranslateFn } func (e errorTranslateQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { values, warnings, err := e.q.LabelValues(name, matchers...) - return values, warnings, TranslateToPromqlAPIError(err) + return values, warnings, e.fn(err) } func (e errorTranslateQuerier) LabelNames() ([]string, storage.Warnings, error) { values, warnings, err := e.q.LabelNames() - return values, warnings, TranslateToPromqlAPIError(err) + return values, warnings, e.fn(err) } func (e errorTranslateQuerier) Close() error { - return TranslateToPromqlAPIError(e.q.Close()) + return e.fn(e.q.Close()) } func (e errorTranslateQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { s := e.q.Select(sortSeries, hints, matchers...) - return errorTranslateSeriesSet{s} + return errorTranslateSeriesSet{s: s, fn: e.fn} } type errorTranslateChunkQuerier struct { - q storage.ChunkQuerier + q storage.ChunkQuerier + fn ErrTranslateFn } func (e errorTranslateChunkQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { values, warnings, err := e.q.LabelValues(name, matchers...) - return values, warnings, TranslateToPromqlAPIError(err) + return values, warnings, e.fn(err) } func (e errorTranslateChunkQuerier) LabelNames() ([]string, storage.Warnings, error) { values, warnings, err := e.q.LabelNames() - return values, warnings, TranslateToPromqlAPIError(err) + return values, warnings, e.fn(err) } func (e errorTranslateChunkQuerier) Close() error { - return TranslateToPromqlAPIError(e.q.Close()) + return e.fn(e.q.Close()) } func (e errorTranslateChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { s := e.q.Select(sortSeries, hints, matchers...) - return errorTranslateChunkSeriesSet{s} + return errorTranslateChunkSeriesSet{s: s, fn: e.fn} } type errorTranslateSeriesSet struct { - s storage.SeriesSet + s storage.SeriesSet + fn ErrTranslateFn } func (e errorTranslateSeriesSet) Next() bool { @@ -146,7 +177,7 @@ func (e errorTranslateSeriesSet) At() storage.Series { } func (e errorTranslateSeriesSet) Err() error { - return TranslateToPromqlAPIError(e.s.Err()) + return e.fn(e.s.Err()) } func (e errorTranslateSeriesSet) Warnings() storage.Warnings { @@ -154,7 +185,8 @@ func (e errorTranslateSeriesSet) Warnings() storage.Warnings { } type errorTranslateChunkSeriesSet struct { - s storage.ChunkSeriesSet + s storage.ChunkSeriesSet + fn ErrTranslateFn } func (e errorTranslateChunkSeriesSet) Next() bool { @@ -166,7 +198,7 @@ func (e errorTranslateChunkSeriesSet) At() storage.ChunkSeries { } func (e errorTranslateChunkSeriesSet) Err() error { - return TranslateToPromqlAPIError(e.s.Err()) + return e.fn(e.s.Err()) } func (e errorTranslateChunkSeriesSet) Warnings() storage.Warnings { diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index afeddf7c78d..35e882212aa 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -113,7 +113,7 @@ func TestApiStatusCodes(t *testing.T) { "error from seriesset": errorTestQueryable{q: errorTestQuerier{s: errorTestSeriesSet{err: tc.err}}}, } { t.Run(fmt.Sprintf("%s/%d", k, ix), func(t *testing.T) { - r := createPrometheusAPI(errorTranslateQueryable{q: q}) + r := createPrometheusAPI(NewErrorTranslateSampleAndChunkQueryable(q)) rec := httptest.NewRecorder() req := httptest.NewRequest("GET", "/api/v1/query?query=up", nil) diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c95cfcd9446..7828a9f14d5 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -149,16 +149,30 @@ func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Coun return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { queries.Inc() result, err := qf(ctx, qs, t) - // We rely on TranslateToPromqlApiError to do its job here... it returns nil, if err is nil. - // It returns promql.ErrStorage, if error should be reported back as 500. - // Other errors it returns are either for canceled or timed-out queriers (we're not reporting those as failures), - // or various user-errors (limits, duplicate samples, etc. ... also not failures). - // - // All errors will still be counted towards "evaluation failures" metrics and logged by Prometheus Ruler, - // but we only want internal errors here. - if _, ok := querier.TranslateToPromqlAPIError(err).(promql.ErrStorage); ok { - failedQueries.Inc() + + // We only care about errors returned by underlying Queryable. Errors returned by PromQL engine are "user-errors", + // and not interesting here. + qerr := QueryableError{} + if err != nil && errors.As(err, &qerr) { + origErr := qerr.Unwrap() + + // Not all errors returned by Queryable are interesting, only those that would result in 500 status code. + // + // We rely on TranslateToPromqlApiError to do its job here... it returns nil, if err is nil. + // It returns promql.ErrStorage, if error should be reported back as 500. + // Other errors it returns are either for canceled or timed-out queriers (we're not reporting those as failures), + // or various user-errors (limits, duplicate samples, etc. ... also not failures). + // + // All errors will still be counted towards "evaluation failures" metrics and logged by Prometheus Ruler, + // but we only want internal errors here. + if _, ok := querier.TranslateToPromqlAPIError(origErr).(promql.ErrStorage); ok { + failedQueries.Inc() + } + + // Return unwrapped error. + return result, origErr } + return result, err } } @@ -234,6 +248,11 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi }, []string{"user"}) } + // Wrap errors returned by Queryable to our wrapper, so that we can distinguish between those errors + // and errors returned by PromQL engine. Errors from Queryable can be either caused by user (limits) or internal errors. + // Errors from PromQL are always "user" errors. + q = querier.NewErrorTranslateQueryableWithFn(q, WrapQueryableErrors) + return func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager { var queryTime prometheus.Counter = nil if rulerQuerySeconds != nil { @@ -255,3 +274,23 @@ func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engi }) } } + +type QueryableError struct { + err error +} + +func (q QueryableError) Unwrap() error { + return q.err +} + +func (q QueryableError) Error() string { + return q.err.Error() +} + +func WrapQueryableErrors(err error) error { + if err == nil { + return err + } + + return QueryableError{err: err} +} diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index dfcb251803a..968f5cc66da 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -229,7 +229,7 @@ func TestMetricsQueryFuncErrors(t *testing.T) { failures := prometheus.NewCounter(prometheus.CounterOpts{}) mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { - return promql.Vector{}, tc.returnedError + return promql.Vector{}, WrapQueryableErrors(tc.returnedError) } qf := MetricsQueryFunc(mockFunc, queries, failures) From 142b7a26e0b924073533a7c054fd0558d426b538 Mon Sep 17 00:00:00 2001 From: Nick Pillitteri <56quarters@users.noreply.github.com> Date: Tue, 20 Jul 2021 10:34:29 -0400 Subject: [PATCH 022/137] Add per-user query metrics for series and bytes returned (#4343) * Add per-user query metrics for series and bytes returned Add stats included in query responses from the querier and distributor for measuring the number of series and bytes included in successful queries. These stats are emitted per-user as summaries from the query frontends. These stats are picked to add visibility into the same resources limited as part of #4179 and #4216. Fixes #4259 Signed-off-by: Nick Pillitteri * Formatting fix Signed-off-by: Nick Pillitteri * Fix changelog to match actual changes Signed-off-by: Nick Pillitteri * Typo Signed-off-by: Nick Pillitteri * Code review changes, rename things for clarity Signed-off-by: Nick Pillitteri * Apply suggestions from code review Co-authored-by: Marco Pracucci Signed-off-by: Nick Pillitteri * Code review changes, remove superfluous summaries Signed-off-by: Nick Pillitteri Co-authored-by: Marco Pracucci Signed-off-by: Alvin Lin --- CHANGELOG.md | 1 + pkg/distributor/query.go | 5 ++ pkg/frontend/transport/handler.go | 25 +++++- pkg/frontend/transport/handler_test.go | 63 +++++++++++++ pkg/querier/blocks_store_queryable.go | 24 ++--- pkg/querier/stats/stats.go | 34 +++++++ pkg/querier/stats/stats.pb.go | 118 +++++++++++++++++++++---- pkg/querier/stats/stats.proto | 4 + pkg/querier/stats/stats_test.go | 91 +++++++++++++++++++ 9 files changed, 337 insertions(+), 28 deletions(-) create mode 100644 pkg/querier/stats/stats_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index c93e39c0a7f..5c5fdea53f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## master / unreleased * [FEATURE] Ruler: Add new `-ruler.query-stats-enabled` which when enabled will report the `cortex_ruler_query_seconds_total` as a per-user metric that tracks the sum of the wall time of executing queries in the ruler in seconds. #4317 * [FEATURE] Add shuffle sharding grouper and planner within compactor to allow further work towards parallelizing compaction #4357 +* [FEATURE] Query Frontend: Add `cortex_query_fetched_series_total` and `cortex_query_fetched_chunks_bytes_total` per-user counters to expose the number of series and bytes fetched as part of queries. These metrics can be enabled with the `-frontend.query-stats-enabled` flag (or its respective YAML config option `query_stats_enabled`). #4343 * [CHANGE] Update Go version to 1.16.6. #4362 * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [CHANGE] Memberlist: the `memberlist_kv_store_value_bytes` has been removed due to values no longer being stored in-memory as encoded bytes. #4345 diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 8619a071258..87b45164dca 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -13,6 +13,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" @@ -282,6 +283,7 @@ func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSe func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) { var ( queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) + reqStats = stats.FromContext(ctx) ) // Fetch samples from multiple ingesters @@ -383,6 +385,9 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri resp.Timeseries = append(resp.Timeseries, series) } + reqStats.AddFetchedSeries(uint64(len(resp.Chunkseries) + len(resp.Timeseries))) + reqStats.AddFetchedChunkBytes(uint64(resp.ChunksSize())) + return resp, nil } diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 435a0227481..af87e4fe305 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -60,6 +60,8 @@ type Handler struct { // Metrics. querySeconds *prometheus.CounterVec + querySeries *prometheus.CounterVec + queryBytes *prometheus.CounterVec activeUsers *util.ActiveUsersCleanupService } @@ -77,8 +79,20 @@ func NewHandler(cfg HandlerConfig, roundTripper http.RoundTripper, log log.Logge Help: "Total amount of wall clock time spend processing queries.", }, []string{"user"}) + h.querySeries = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_query_fetched_series_total", + Help: "Number of series fetched to execute a query.", + }, []string{"user"}) + + h.queryBytes = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_query_fetched_chunks_bytes_total", + Help: "Size of all chunks fetched to execute a query in bytes.", + }, []string{"user"}) + h.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(func(user string) { h.querySeconds.DeleteLabelValues(user) + h.querySeries.DeleteLabelValues(user) + h.queryBytes.DeleteLabelValues(user) }) // If cleaner stops or fail, we will simply not clean the metrics for inactive users. _ = h.activeUsers.StartAsync(context.Background()) @@ -165,9 +179,14 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer return } userID := tenant.JoinTenantIDs(tenantIDs) + wallTime := stats.LoadWallTime() + numSeries := stats.LoadFetchedSeries() + numBytes := stats.LoadFetchedChunkBytes() // Track stats. - f.querySeconds.WithLabelValues(userID).Add(stats.LoadWallTime().Seconds()) + f.querySeconds.WithLabelValues(userID).Add(wallTime.Seconds()) + f.querySeries.WithLabelValues(userID).Add(float64(numSeries)) + f.queryBytes.WithLabelValues(userID).Add(float64(numBytes)) f.activeUsers.UpdateUserTimestamp(userID, time.Now()) // Log stats. @@ -177,7 +196,9 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer "method", r.Method, "path", r.URL.Path, "response_time", queryResponseTime, - "query_wall_time_seconds", stats.LoadWallTime().Seconds(), + "query_wall_time_seconds", wallTime.Seconds(), + "fetched_series_count", numSeries, + "fetched_chunks_bytes", numBytes, }, formatQueryString(queryString)...) level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go index 136f1879933..8553d9fe21c 100644 --- a/pkg/frontend/transport/handler_test.go +++ b/pkg/frontend/transport/handler_test.go @@ -2,15 +2,28 @@ package transport import ( "context" + "io" "net/http" "net/http/httptest" + "strings" "testing" + "github.com/go-kit/kit/log" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/user" ) +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return f(r) +} + func TestWriteError(t *testing.T) { for _, test := range []struct { status int @@ -28,3 +41,53 @@ func TestWriteError(t *testing.T) { }) } } + +func TestHandler_ServeHTTP(t *testing.T) { + for _, tt := range []struct { + name string + cfg HandlerConfig + expectedMetrics int + }{ + { + name: "test handler with stats enabled", + cfg: HandlerConfig{QueryStatsEnabled: true}, + expectedMetrics: 3, + }, + { + name: "test handler with stats disabled", + cfg: HandlerConfig{QueryStatsEnabled: false}, + expectedMetrics: 0, + }, + } { + t.Run(tt.name, func(t *testing.T) { + roundTripper := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader("{}")), + }, nil + }) + + reg := prometheus.NewPedanticRegistry() + handler := NewHandler(tt.cfg, roundTripper, log.NewNopLogger(), reg) + + ctx := user.InjectOrgID(context.Background(), "12345") + req := httptest.NewRequest("GET", "/", nil) + req = req.WithContext(ctx) + resp := httptest.NewRecorder() + + handler.ServeHTTP(resp, req) + _, _ = io.ReadAll(resp.Body) + require.Equal(t, resp.Code, http.StatusOK) + + count, err := promtest.GatherAndCount( + reg, + "cortex_query_seconds_total", + "cortex_query_fetched_series_total", + "cortex_query_fetched_chunks_bytes_total", + ) + + assert.NoError(t, err) + assert.Equal(t, tt.expectedMetrics, count) + }) + } +} diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 98cb61e1401..02c50b5b9d1 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -29,6 +29,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/storage/bucket" @@ -565,6 +566,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( numChunks = atomic.NewInt32(0) spanLog = spanlogger.FromContext(ctx) queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) + reqStats = stats.FromContext(ctx) ) // Concurrently fetch series from all clients. @@ -626,10 +628,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( return validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, util.LabelMatchersToString(matchers), maxChunksLimit)) } } - chunksSize := 0 - for _, c := range s.Chunks { - chunksSize += c.Size() - } + chunksSize := countChunkBytes(s) if chunkBytesLimitErr := queryLimiter.AddChunkBytes(chunksSize); chunkBytesLimitErr != nil { return validation.LimitError(chunkBytesLimitErr.Error()) } @@ -657,10 +656,16 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( } } + numSeries := len(mySeries) + chunkBytes := countChunkBytes(mySeries...) + + reqStats.AddFetchedSeries(uint64(numSeries)) + reqStats.AddFetchedChunkBytes(uint64(chunkBytes)) + level.Debug(spanLog).Log("msg", "received series from store-gateway", "instance", c.RemoteAddress(), - "num series", len(mySeries), - "bytes series", countSeriesBytes(mySeries), + "fetched series", numSeries, + "fetched chunk bytes", chunkBytes, "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) @@ -944,12 +949,11 @@ func convertBlockHintsToULIDs(hints []hintspb.Block) ([]ulid.ULID, error) { return res, nil } -func countSeriesBytes(series []*storepb.Series) (count uint64) { +// countChunkBytes returns the size of the chunks making up the provided series in bytes +func countChunkBytes(series ...*storepb.Series) (count int) { for _, s := range series { for _, c := range s.Chunks { - if c.Raw != nil { - count += uint64(len(c.Raw.Data)) - } + count += c.Size() } } diff --git a/pkg/querier/stats/stats.go b/pkg/querier/stats/stats.go index 05a7de53473..1a39b320696 100644 --- a/pkg/querier/stats/stats.go +++ b/pkg/querier/stats/stats.go @@ -54,6 +54,38 @@ func (s *Stats) LoadWallTime() time.Duration { return time.Duration(atomic.LoadInt64((*int64)(&s.WallTime))) } +func (s *Stats) AddFetchedSeries(series uint64) { + if s == nil { + return + } + + atomic.AddUint64(&s.FetchedSeriesCount, series) +} + +func (s *Stats) LoadFetchedSeries() uint64 { + if s == nil { + return 0 + } + + return atomic.LoadUint64(&s.FetchedSeriesCount) +} + +func (s *Stats) AddFetchedChunkBytes(bytes uint64) { + if s == nil { + return + } + + atomic.AddUint64(&s.FetchedChunkBytes, bytes) +} + +func (s *Stats) LoadFetchedChunkBytes() uint64 { + if s == nil { + return 0 + } + + return atomic.LoadUint64(&s.FetchedChunkBytes) +} + // Merge the provide Stats into this one. func (s *Stats) Merge(other *Stats) { if s == nil || other == nil { @@ -61,6 +93,8 @@ func (s *Stats) Merge(other *Stats) { } s.AddWallTime(other.LoadWallTime()) + s.AddFetchedSeries(other.LoadFetchedSeries()) + s.AddFetchedChunkBytes(other.LoadFetchedChunkBytes()) } func ShouldTrackHTTPGRPCResponse(r *httpgrpc.HTTPResponse) bool { diff --git a/pkg/querier/stats/stats.pb.go b/pkg/querier/stats/stats.pb.go index b9ec9a49bad..9fd4affc1f4 100644 --- a/pkg/querier/stats/stats.pb.go +++ b/pkg/querier/stats/stats.pb.go @@ -32,6 +32,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Stats struct { // The sum of all wall time spent in the querier to execute the query. WallTime time.Duration `protobuf:"bytes,1,opt,name=wall_time,json=wallTime,proto3,stdduration" json:"wall_time"` + // The number of series fetched for the query + FetchedSeriesCount uint64 `protobuf:"varint,2,opt,name=fetched_series_count,json=fetchedSeriesCount,proto3" json:"fetched_series_count,omitempty"` + // The number of bytes of the chunks fetched for the query + FetchedChunkBytes uint64 `protobuf:"varint,3,opt,name=fetched_chunk_bytes,json=fetchedChunkBytes,proto3" json:"fetched_chunk_bytes,omitempty"` } func (m *Stats) Reset() { *m = Stats{} } @@ -73,6 +77,20 @@ func (m *Stats) GetWallTime() time.Duration { return 0 } +func (m *Stats) GetFetchedSeriesCount() uint64 { + if m != nil { + return m.FetchedSeriesCount + } + return 0 +} + +func (m *Stats) GetFetchedChunkBytes() uint64 { + if m != nil { + return m.FetchedChunkBytes + } + return 0 +} + func init() { proto.RegisterType((*Stats)(nil), "stats.Stats") } @@ -80,21 +98,25 @@ func init() { func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } var fileDescriptor_b4756a0aec8b9d44 = []byte{ - // 213 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0x2e, 0x49, 0x2c, - 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x73, 0xa4, 0x74, 0xd3, 0x33, 0x4b, - 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xb2, 0x49, - 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0x74, 0x49, 0xc9, 0xa5, 0xe7, 0xe7, 0xa7, 0xe7, - 0xa4, 0x22, 0x54, 0xa5, 0x94, 0x16, 0x25, 0x96, 0x64, 0xe6, 0xe7, 0x41, 0xe4, 0x95, 0x3c, 0xb9, - 0x58, 0x83, 0x41, 0xe6, 0x0a, 0x39, 0x70, 0x71, 0x96, 0x27, 0xe6, 0xe4, 0xc4, 0x97, 0x64, 0xe6, - 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x49, 0xea, 0x41, 0x34, 0xeb, 0xc1, 0x34, 0xeb, - 0xb9, 0x40, 0x35, 0x3b, 0x71, 0x9c, 0xb8, 0x27, 0xcf, 0x30, 0xe3, 0xbe, 0x3c, 0x63, 0x10, 0x07, - 0x48, 0x57, 0x48, 0x66, 0x6e, 0xaa, 0x93, 0xf5, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, - 0x7c, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, - 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x8f, - 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, - 0x88, 0xb7, 0x92, 0xd8, 0xc0, 0x76, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xc4, 0x26, - 0x5d, 0xf3, 0x00, 0x00, 0x00, + // 281 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0xd0, 0xb1, 0x4e, 0x83, 0x40, + 0x1c, 0xc7, 0xf1, 0xfb, 0xab, 0x35, 0x95, 0x4e, 0xa2, 0x03, 0x76, 0xf8, 0xb7, 0x71, 0xea, 0xe2, + 0xd5, 0xe8, 0xe8, 0x62, 0xa8, 0x4f, 0xd0, 0x3a, 0xb9, 0x10, 0xa0, 0x57, 0x20, 0x02, 0x67, 0xe0, + 0x2e, 0xc6, 0xcd, 0x47, 0x70, 0xf4, 0x11, 0x4c, 0x7c, 0x91, 0x8e, 0x8c, 0x9d, 0x54, 0x8e, 0xc5, + 0xb1, 0x8f, 0x60, 0xee, 0xa0, 0x71, 0xe3, 0x97, 0x0f, 0xdf, 0x4b, 0xee, 0xac, 0x41, 0x29, 0x7c, + 0x51, 0xd2, 0xa7, 0x82, 0x0b, 0x6e, 0xf7, 0xcc, 0x18, 0x5e, 0x44, 0x89, 0x88, 0x65, 0x40, 0x43, + 0x9e, 0x4d, 0x23, 0x1e, 0xf1, 0xa9, 0xd1, 0x40, 0xae, 0xcc, 0x32, 0xc3, 0x7c, 0xb5, 0xd5, 0x10, + 0x23, 0xce, 0xa3, 0x94, 0xfd, 0xff, 0xb5, 0x94, 0x85, 0x2f, 0x12, 0x9e, 0xb7, 0x7e, 0xfe, 0x09, + 0x56, 0x6f, 0xa1, 0x0f, 0xb6, 0x6f, 0xad, 0xa3, 0x67, 0x3f, 0x4d, 0x3d, 0x91, 0x64, 0xcc, 0x81, + 0x31, 0x4c, 0x06, 0x57, 0x67, 0xb4, 0xad, 0xe9, 0xae, 0xa6, 0x77, 0x5d, 0xed, 0xf6, 0xd7, 0x5f, + 0x23, 0xf2, 0xfe, 0x3d, 0x82, 0x79, 0x5f, 0x57, 0xf7, 0x49, 0xc6, 0xec, 0x4b, 0xeb, 0x74, 0xc5, + 0x44, 0x18, 0xb3, 0xa5, 0x57, 0xb2, 0x22, 0x61, 0xa5, 0x17, 0x72, 0x99, 0x0b, 0x67, 0x6f, 0x0c, + 0x93, 0x83, 0xb9, 0xdd, 0xd9, 0xc2, 0xd0, 0x4c, 0x8b, 0x4d, 0xad, 0x93, 0x5d, 0x11, 0xc6, 0x32, + 0x7f, 0xf4, 0x82, 0x17, 0xc1, 0x4a, 0x67, 0xdf, 0x04, 0xc7, 0x1d, 0xcd, 0xb4, 0xb8, 0x1a, 0xdc, + 0x9b, 0xaa, 0x46, 0xb2, 0xa9, 0x91, 0x6c, 0x6b, 0x84, 0x57, 0x85, 0xf0, 0xa1, 0x10, 0xd6, 0x0a, + 0xa1, 0x52, 0x08, 0x3f, 0x0a, 0xe1, 0x57, 0x21, 0xd9, 0x2a, 0x84, 0xb7, 0x06, 0x49, 0xd5, 0x20, + 0xd9, 0x34, 0x48, 0x1e, 0xda, 0x97, 0x0b, 0x0e, 0xcd, 0x2d, 0xae, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x9d, 0xf1, 0x86, 0xb8, 0x56, 0x01, 0x00, 0x00, } func (this *Stats) Equal(that interface{}) bool { @@ -119,15 +141,23 @@ func (this *Stats) Equal(that interface{}) bool { if this.WallTime != that1.WallTime { return false } + if this.FetchedSeriesCount != that1.FetchedSeriesCount { + return false + } + if this.FetchedChunkBytes != that1.FetchedChunkBytes { + return false + } return true } func (this *Stats) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 7) s = append(s, "&stats.Stats{") s = append(s, "WallTime: "+fmt.Sprintf("%#v", this.WallTime)+",\n") + s = append(s, "FetchedSeriesCount: "+fmt.Sprintf("%#v", this.FetchedSeriesCount)+",\n") + s = append(s, "FetchedChunkBytes: "+fmt.Sprintf("%#v", this.FetchedChunkBytes)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -159,6 +189,16 @@ func (m *Stats) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.FetchedChunkBytes != 0 { + i = encodeVarintStats(dAtA, i, uint64(m.FetchedChunkBytes)) + i-- + dAtA[i] = 0x18 + } + if m.FetchedSeriesCount != 0 { + i = encodeVarintStats(dAtA, i, uint64(m.FetchedSeriesCount)) + i-- + dAtA[i] = 0x10 + } n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.WallTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.WallTime):]) if err1 != nil { return 0, err1 @@ -189,6 +229,12 @@ func (m *Stats) Size() (n int) { _ = l l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.WallTime) n += 1 + l + sovStats(uint64(l)) + if m.FetchedSeriesCount != 0 { + n += 1 + sovStats(uint64(m.FetchedSeriesCount)) + } + if m.FetchedChunkBytes != 0 { + n += 1 + sovStats(uint64(m.FetchedChunkBytes)) + } return n } @@ -204,6 +250,8 @@ func (this *Stats) String() string { } s := strings.Join([]string{`&Stats{`, `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `FetchedSeriesCount:` + fmt.Sprintf("%v", this.FetchedSeriesCount) + `,`, + `FetchedChunkBytes:` + fmt.Sprintf("%v", this.FetchedChunkBytes) + `,`, `}`, }, "") return s @@ -278,6 +326,44 @@ func (m *Stats) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FetchedSeriesCount", wireType) + } + m.FetchedSeriesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FetchedSeriesCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FetchedChunkBytes", wireType) + } + m.FetchedChunkBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FetchedChunkBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipStats(dAtA[iNdEx:]) diff --git a/pkg/querier/stats/stats.proto b/pkg/querier/stats/stats.proto index 3ec55448af7..765dd99582c 100644 --- a/pkg/querier/stats/stats.proto +++ b/pkg/querier/stats/stats.proto @@ -13,4 +13,8 @@ option (gogoproto.unmarshaler_all) = true; message Stats { // The sum of all wall time spent in the querier to execute the query. google.protobuf.Duration wall_time = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + // The number of series fetched for the query + uint64 fetched_series_count = 2; + // The number of bytes of the chunks fetched for the query + uint64 fetched_chunk_bytes = 3; } diff --git a/pkg/querier/stats/stats_test.go b/pkg/querier/stats/stats_test.go new file mode 100644 index 00000000000..f8a23c96c30 --- /dev/null +++ b/pkg/querier/stats/stats_test.go @@ -0,0 +1,91 @@ +package stats + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestStats_WallTime(t *testing.T) { + t.Run("add and load wall time", func(t *testing.T) { + stats, _ := ContextWithEmptyStats(context.Background()) + stats.AddWallTime(time.Second) + stats.AddWallTime(time.Second) + + assert.Equal(t, 2*time.Second, stats.LoadWallTime()) + }) + + t.Run("add and load wall time nil receiver", func(t *testing.T) { + var stats *Stats + stats.AddWallTime(time.Second) + + assert.Equal(t, time.Duration(0), stats.LoadWallTime()) + }) +} + +func TestStats_AddFetchedSeries(t *testing.T) { + t.Run("add and load series", func(t *testing.T) { + stats, _ := ContextWithEmptyStats(context.Background()) + stats.AddFetchedSeries(100) + stats.AddFetchedSeries(50) + + assert.Equal(t, uint64(150), stats.LoadFetchedSeries()) + }) + + t.Run("add and load series nil receiver", func(t *testing.T) { + var stats *Stats + stats.AddFetchedSeries(50) + + assert.Equal(t, uint64(0), stats.LoadFetchedSeries()) + }) +} + +func TestStats_AddFetchedChunkBytes(t *testing.T) { + t.Run("add and load bytes", func(t *testing.T) { + stats, _ := ContextWithEmptyStats(context.Background()) + stats.AddFetchedChunkBytes(4096) + stats.AddFetchedChunkBytes(4096) + + assert.Equal(t, uint64(8192), stats.LoadFetchedChunkBytes()) + }) + + t.Run("add and load bytes nil receiver", func(t *testing.T) { + var stats *Stats + stats.AddFetchedChunkBytes(1024) + + assert.Equal(t, uint64(0), stats.LoadFetchedChunkBytes()) + }) +} + +func TestStats_Merge(t *testing.T) { + t.Run("merge two stats objects", func(t *testing.T) { + stats1 := &Stats{} + stats1.AddWallTime(time.Millisecond) + stats1.AddFetchedSeries(50) + stats1.AddFetchedChunkBytes(42) + + stats2 := &Stats{} + stats2.AddWallTime(time.Second) + stats2.AddFetchedSeries(60) + stats2.AddFetchedChunkBytes(100) + + stats1.Merge(stats2) + + assert.Equal(t, 1001*time.Millisecond, stats1.LoadWallTime()) + assert.Equal(t, uint64(110), stats1.LoadFetchedSeries()) + assert.Equal(t, uint64(142), stats1.LoadFetchedChunkBytes()) + }) + + t.Run("merge two nil stats objects", func(t *testing.T) { + var stats1 *Stats + var stats2 *Stats + + stats1.Merge(stats2) + + assert.Equal(t, time.Duration(0), stats1.LoadWallTime()) + assert.Equal(t, uint64(0), stats1.LoadFetchedSeries()) + assert.Equal(t, uint64(0), stats1.LoadFetchedChunkBytes()) + }) +} From c096f4f1aed5e477564acbb54248fc9128aec95a Mon Sep 17 00:00:00 2001 From: Christian Simon Date: Wed, 21 Jul 2021 15:04:45 +0200 Subject: [PATCH 023/137] Restrict path segments in TenantIDs (#4375) This prevents paths generated from TenantIDs to become vulnerable to path traversal attacks. CVE-2021-36157 Signed-off-by: Christian Simon Signed-off-by: Alvin Lin --- CHANGELOG.md | 2 ++ Makefile | 2 +- docs/guides/limitations.md | 2 +- pkg/tenant/resolver.go | 32 +++++++++++++++++++++++++--- pkg/tenant/resolver_test.go | 42 +++++++++++++++++++++++++++++++++++++ 5 files changed, 75 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c5fdea53f5..fb94bd921fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ * [CHANGE] Update Go version to 1.16.6. #4362 * [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260 * [CHANGE] Memberlist: the `memberlist_kv_store_value_bytes` has been removed due to values no longer being stored in-memory as encoded bytes. #4345 +* [CHANGE] Prevent path traversal attack from users able to control the HTTP header `X-Scope-OrgID`. #4375 (CVE-2021-36157) + * Users only have control of the HTTP header when Cortex is not frontend by an auth proxy validating the tenant IDs * [ENHANCEMENT] Add timeout for waiting on compactor to become ACTIVE in the ring. #4262 * [ENHANCEMENT] Reduce memory used by streaming queries, particularly in ruler. #4341 * [ENHANCEMENT] Ring: allow experimental configuration of disabling of heartbeat timeouts by setting the relevant configuration value to zero. Applies to the following: #4342 diff --git a/Makefile b/Makefile index 779b5ae4762..41930da805c 100644 --- a/Makefile +++ b/Makefile @@ -176,7 +176,7 @@ lint: # Configured via .golangci.yml. golangci-lint run - # Ensure no blacklisted package is imported. + # Ensure no blocklisted package is imported. GOFLAGS="-tags=requires_docker" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ golang.org/x/net/context=context,\ sync/atomic=go.uber.org/atomic,\ diff --git a/docs/guides/limitations.md b/docs/guides/limitations.md index cd2caefabcd..cd72e061a60 100644 --- a/docs/guides/limitations.md +++ b/docs/guides/limitations.md @@ -24,7 +24,7 @@ The following character sets are generally **safe for use in the tenant ID**: - Exclamation point (`!`) - Hyphen (`-`) - Underscore (`_`) - - Period (`.`) + - Single Period (`.`), but the tenant IDs `.` and `..` is considered invalid - Asterisk (`*`) - Single quote (`'`) - Open parenthesis (`(`) diff --git a/pkg/tenant/resolver.go b/pkg/tenant/resolver.go index e5fbea25298..72517b082f4 100644 --- a/pkg/tenant/resolver.go +++ b/pkg/tenant/resolver.go @@ -2,6 +2,7 @@ package tenant import ( "context" + "errors" "net/http" "strings" @@ -59,14 +60,36 @@ func NewSingleResolver() *SingleResolver { type SingleResolver struct { } +// containsUnsafePathSegments will return true if the string is a directory +// reference like `.` and `..` or if any path separator character like `/` and +// `\` can be found. +func containsUnsafePathSegments(id string) bool { + // handle the relative reference to current and parent path. + if id == "." || id == ".." { + return true + } + + return strings.ContainsAny(id, "\\/") +} + +var errInvalidTenantID = errors.New("invalid tenant ID") + func (t *SingleResolver) TenantID(ctx context.Context) (string, error) { //lint:ignore faillint wrapper around upstream method - return user.ExtractOrgID(ctx) + id, err := user.ExtractOrgID(ctx) + if err != nil { + return "", err + } + + if containsUnsafePathSegments(id) { + return "", errInvalidTenantID + } + + return id, nil } func (t *SingleResolver) TenantIDs(ctx context.Context) ([]string, error) { - //lint:ignore faillint wrapper around upstream method - orgID, err := user.ExtractOrgID(ctx) + orgID, err := t.TenantID(ctx) if err != nil { return nil, err } @@ -109,6 +132,9 @@ func (t *MultiResolver) TenantIDs(ctx context.Context) ([]string, error) { if err := ValidTenantID(orgID); err != nil { return nil, err } + if containsUnsafePathSegments(orgID) { + return nil, errInvalidTenantID + } } return NormalizeTenantIDs(orgIDs), nil diff --git a/pkg/tenant/resolver_test.go b/pkg/tenant/resolver_test.go index 69559263b49..4d2da241609 100644 --- a/pkg/tenant/resolver_test.go +++ b/pkg/tenant/resolver_test.go @@ -64,6 +64,18 @@ var commonResolverTestCases = []resolverTestCase{ tenantID: "tenant-a", tenantIDs: []string{"tenant-a"}, }, + { + name: "parent-dir", + headerValue: strptr(".."), + errTenantID: errInvalidTenantID, + errTenantIDs: errInvalidTenantID, + }, + { + name: "current-dir", + headerValue: strptr("."), + errTenantID: errInvalidTenantID, + errTenantIDs: errInvalidTenantID, + }, } func TestSingleResolver(t *testing.T) { @@ -75,6 +87,18 @@ func TestSingleResolver(t *testing.T) { tenantID: "tenant-a|tenant-b", tenantIDs: []string{"tenant-a|tenant-b"}, }, + { + name: "containing-forward-slash", + headerValue: strptr("forward/slash"), + errTenantID: errInvalidTenantID, + errTenantIDs: errInvalidTenantID, + }, + { + name: "containing-backward-slash", + headerValue: strptr(`backward\slash`), + errTenantID: errInvalidTenantID, + errTenantIDs: errInvalidTenantID, + }, }...) { t.Run(tc.name, tc.test(r)) } @@ -101,6 +125,24 @@ func TestMultiResolver(t *testing.T) { errTenantID: user.ErrTooManyOrgIDs, tenantIDs: []string{"tenant-a", "tenant-b"}, }, + { + name: "multi-tenant-with-relative-path", + headerValue: strptr("tenant-a|tenant-b|.."), + errTenantID: errInvalidTenantID, + errTenantIDs: errInvalidTenantID, + }, + { + name: "containing-forward-slash", + headerValue: strptr("forward/slash"), + errTenantID: &errTenantIDUnsupportedCharacter{pos: 7, tenantID: "forward/slash"}, + errTenantIDs: &errTenantIDUnsupportedCharacter{pos: 7, tenantID: "forward/slash"}, + }, + { + name: "containing-backward-slash", + headerValue: strptr(`backward\slash`), + errTenantID: &errTenantIDUnsupportedCharacter{pos: 8, tenantID: "backward\\slash"}, + errTenantIDs: &errTenantIDUnsupportedCharacter{pos: 8, tenantID: "backward\\slash"}, + }, }...) { t.Run(tc.name, tc.test(r)) } From 3683709eb636a9f701f17327fc4a3458203adb71 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Thu, 22 Jul 2021 10:11:37 +0200 Subject: [PATCH 024/137] Chore: Upgrade Prometheus (#4374) * Chore: Upgrade Prometheus Signed-off-by: Arve Knudsen * Chore: Upgrade Prometheus Signed-off-by: Arve Knudsen Signed-off-by: Alvin Lin --- cmd/query-tee/main.go | 3 +- go.mod | 26 +- go.sum | 494 ++- pkg/compactor/compactor.go | 2 +- pkg/compactor/compactor_test.go | 2 +- pkg/configs/legacy_promql/test.go | 2 +- pkg/configs/userconfig/config.go | 10 +- pkg/configs/userconfig/config_test.go | 1 + pkg/ingester/ingester_v2.go | 9 +- pkg/ingester/ingester_v2_test.go | 2 +- pkg/querier/querier_test.go | 2 +- pkg/ruler/compat.go | 2 +- pkg/ruler/manager.go | 2 +- pkg/ruler/manager_test.go | 2 +- pkg/storegateway/bucket_stores_test.go | 2 +- pkg/storegateway/gateway_test.go | 2 +- tools/blocksconvert/builder/tsdb_test.go | 2 +- vendor/cloud.google.com/go/CHANGES.md | 60 + vendor/cloud.google.com/go/CONTRIBUTING.md | 22 +- vendor/cloud.google.com/go/SECURITY.md | 7 + .../go/bigtable/.repo-metadata.json | 12 - .../cloud.google.com/go/bigtable/CHANGES.md | 5 + vendor/cloud.google.com/go/bigtable/admin.go | 25 +- .../cloud.google.com/go/bigtable/bigtable.go | 10 +- vendor/cloud.google.com/go/bigtable/go.mod | 17 +- vendor/cloud.google.com/go/bigtable/go.sum | 65 +- vendor/cloud.google.com/go/go.mod | 24 +- vendor/cloud.google.com/go/go.sum | 56 +- .../go/internal/.repo-metadata-full.json | 817 +++-- .../go/longrunning/autogen/doc.go | 6 +- .../longrunning/autogen/operations_client.go | 209 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 297 +- .../aws/aws-sdk-go/aws/request/request.go | 17 +- .../aws/aws-sdk-go/aws/session/credentials.go | 14 +- .../aws-sdk-go/aws/session/shared_config.go | 5 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/internal/s3shared/arn/arn.go | 4 + .../internal/s3shared/endpoint_errors.go | 13 + .../internal/s3shared/resource_request.go | 2 + .../aws/aws-sdk-go/service/s3/api.go | 254 +- .../aws/aws-sdk-go/service/s3/endpoint.go | 5 +- vendor/github.com/coreos/go-systemd/LICENSE | 191 ++ vendor/github.com/coreos/go-systemd/NOTICE | 5 + .../coreos/go-systemd/journal/journal.go | 225 ++ vendor/github.com/coreos/pkg/LICENSE | 202 ++ vendor/github.com/coreos/pkg/NOTICE | 5 + .../github.com/coreos/pkg/capnslog/README.md | 39 + .../coreos/pkg/capnslog/formatters.go | 157 + .../coreos/pkg/capnslog/glog_formatter.go | 96 + vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 + .../coreos/pkg/capnslog/log_hijack.go | 39 + .../github.com/coreos/pkg/capnslog/logmap.go | 245 ++ .../coreos/pkg/capnslog/pkg_logger.go | 191 ++ .../coreos/pkg/capnslog/syslog_formatter.go | 65 + vendor/github.com/go-kit/log/.gitignore | 15 + vendor/github.com/go-kit/log/LICENSE | 21 + vendor/github.com/go-kit/log/README.md | 151 + vendor/github.com/go-kit/log/doc.go | 116 + vendor/github.com/go-kit/log/go.mod | 8 + vendor/github.com/go-kit/log/go.sum | 4 + vendor/github.com/go-kit/log/json_logger.go | 91 + vendor/github.com/go-kit/log/level/doc.go | 22 + vendor/github.com/go-kit/log/level/level.go | 205 ++ vendor/github.com/go-kit/log/log.go | 179 ++ vendor/github.com/go-kit/log/logfmt_logger.go | 62 + vendor/github.com/go-kit/log/nop_logger.go | 8 + vendor/github.com/go-kit/log/stdlib.go | 151 + vendor/github.com/go-kit/log/sync.go | 113 + vendor/github.com/go-kit/log/value.go | 110 + .../gogo/googleapis/google/rpc/code.pb.go | 40 +- .../gogo/googleapis/google/rpc/code.proto | 8 +- .../googleapis/google/rpc/error_details.pb.go | 1704 ++++++++--- .../googleapis/google/rpc/error_details.proto | 60 +- .../gogo/googleapis/google/rpc/status.pb.go | 285 +- .../gogo/googleapis/google/rpc/status.proto | 61 +- vendor/github.com/google/go-cmp/cmp/path.go | 2 +- .../google/go-cmp/cmp/report_slices.go | 202 +- vendor/github.com/google/uuid/hash.go | 4 +- vendor/github.com/google/uuid/sql.go | 2 +- vendor/github.com/google/uuid/uuid.go | 10 +- vendor/github.com/google/uuid/version4.go | 8 + vendor/github.com/miekg/dns/README.md | 1 + vendor/github.com/miekg/dns/dnssec.go | 8 + vendor/github.com/miekg/dns/doc.go | 10 +- vendor/github.com/miekg/dns/edns.go | 111 + vendor/github.com/miekg/dns/go.mod | 2 +- ...en_go_not111.go => listen_no_reuseport.go} | 0 .../{listen_go111.go => listen_reuseport.go} | 0 vendor/github.com/miekg/dns/msg_helpers.go | 2 + vendor/github.com/miekg/dns/scan.go | 3 + vendor/github.com/miekg/dns/svcb.go | 14 +- vendor/github.com/miekg/dns/version.go | 2 +- .../alertmanager/pkg/labels/matcher.go | 5 +- .../alertmanager/pkg/labels/parse.go | 35 +- .../alertmanager/silence/silence.go | 74 +- .../prometheus/alertmanager/types/types.go | 94 +- .../client_golang/api/prometheus/v1/api.go | 119 +- .../prometheus/collectors/collectors.go | 16 + .../collectors/dbstats_collector.go | 119 + .../collectors/dbstats_collector_go115.go | 30 + .../collectors/dbstats_collector_pre_go115.go | 26 + .../prometheus/collectors/expvar_collector.go | 57 + .../prometheus/collectors/go_collector.go | 69 + .../collectors/process_collector.go | 56 + .../prometheus/expvar_collector.go | 39 +- .../client_golang/prometheus/go_collector.go | 47 +- .../client_golang/prometheus/histogram.go | 7 +- .../prometheus/process_collector.go | 12 +- .../client_golang/prometheus/summary.go | 11 +- .../prometheus/common/config/http_config.go | 27 +- .../prometheus/prometheus/config/config.go | 37 +- .../prometheus/prometheus/discovery/README.md | 2 +- .../prometheus/discovery/discovery.go | 2 +- .../prometheus/discovery/dns/dns.go | 7 +- .../prometheus/discovery/file/file.go | 39 +- .../prometheus/discovery/manager.go | 12 +- .../prometheus/discovery/refresh/refresh.go | 4 +- .../prometheus/notifier/notifier.go | 4 +- .../prometheus/pkg/logging/dedupe.go | 2 +- .../prometheus/prometheus/pkg/logging/file.go | 2 +- .../prometheus/pkg/logging/ratelimit.go | 2 +- .../prometheus/pkg/rulefmt/rulefmt.go | 3 +- .../pkg/textparse/openmetricsparse.go | 6 + .../prometheus/prometheus/promql/engine.go | 13 +- .../prometheus/promql/parser/parse.go | 4 +- .../prometheus/promql/query_logger.go | 4 +- .../prometheus/prometheus/promql/test.go | 16 +- .../prometheus/prometheus/rules/alerting.go | 12 +- .../prometheus/prometheus/rules/manager.go | 11 +- .../prometheus/prometheus/scrape/manager.go | 6 +- .../prometheus/prometheus/scrape/scrape.go | 76 +- .../prometheus/prometheus/scrape/target.go | 7 +- .../prometheus/prometheus/storage/fanout.go | 4 +- .../prometheus/storage/interface.go | 3 +- .../prometheus/prometheus/storage/merge.go | 2 +- .../storage/remote/metadata_watcher.go | 4 +- .../storage/remote/queue_manager.go | 30 +- .../prometheus/storage/remote/read_handler.go | 4 +- .../prometheus/storage/remote/storage.go | 2 +- .../prometheus/storage/remote/write.go | 2 +- .../storage/remote/write_handler.go | 4 +- .../prometheus/prometheus/storage/series.go | 37 +- .../prometheus/template/template.go | 4 +- .../prometheus/prometheus/tsdb/README.md | 2 +- .../prometheus/prometheus/tsdb/block.go | 4 +- .../prometheus/prometheus/tsdb/blockwriter.go | 8 +- .../prometheus/tsdb/chunkenc/xor.go | 18 +- .../prometheus/prometheus/tsdb/compact.go | 20 +- .../prometheus/prometheus/tsdb/db.go | 165 +- .../prometheus/prometheus/tsdb/exemplar.go | 225 +- .../prometheus/prometheus/tsdb/head.go | 300 +- .../prometheus/prometheus/tsdb/isolation.go | 29 +- .../prometheus/prometheus/tsdb/repair.go | 4 +- .../prometheus/tsdb/tombstones/tombstones.go | 4 +- .../prometheus/tsdb/tsdbblockutil.go | 2 +- .../prometheus/tsdb/tsdbutil/chunks.go | 12 + .../prometheus/prometheus/tsdb/wal.go | 8 +- .../prometheus/tsdb/wal/checkpoint.go | 4 +- .../prometheus/tsdb/wal/live_reader.go | 4 +- .../prometheus/prometheus/tsdb/wal/wal.go | 4 +- .../prometheus/prometheus/tsdb/wal/watcher.go | 15 +- .../prometheus/util/teststorage/storage.go | 9 +- .../prometheus/util/testutil/logging.go | 2 +- .../prometheus/prometheus/web/api/v1/api.go | 25 +- .../uber/jaeger-client-go/CHANGELOG.md | 15 + .../github.com/uber/jaeger-client-go/Makefile | 21 +- .../uber/jaeger-client-go/constants.go | 2 +- .../baggage/remote/restriction_manager.go | 5 +- .../thrift-gen/agent/GoUnusedProtection__.go | 6 + .../agent/{constants.go => agent-consts.go} | 11 +- .../thrift-gen/agent/agent.go | 607 ++-- .../thrift-gen/agent/ttypes.go | 21 - .../baggage/GoUnusedProtection__.go | 6 + .../{constants.go => baggage-consts.go} | 11 +- .../thrift-gen/baggage/baggage.go | 565 ++++ .../baggage/baggagerestrictionmanager.go | 435 --- .../thrift-gen/baggage/ttypes.go | 154 - .../thrift-gen/jaeger/GoUnusedProtection__.go | 6 + .../thrift-gen/jaeger/agent.go | 242 -- .../jaeger/{constants.go => jaeger-consts.go} | 11 +- .../thrift-gen/jaeger/jaeger.go | 2698 +++++++++++++++++ .../thrift-gen/jaeger/ttypes.go | 2106 ------------- .../sampling/GoUnusedProtection__.go | 6 + .../{constants.go => sampling-consts.go} | 11 +- .../thrift-gen/sampling/sampling.go | 1323 ++++++++ .../thrift-gen/sampling/samplingmanager.go | 410 --- .../thrift-gen/sampling/ttypes.go | 873 ------ .../zipkincore/GoUnusedProtection__.go | 6 + .../thrift-gen/zipkincore/ttypes.go | 1337 -------- .../thrift-gen/zipkincore/zipkincollector.go | 446 --- .../{constants.go => zipkincore-consts.go} | 10 +- .../thrift-gen/zipkincore/zipkincore.go | 1853 +++++++++++ .../uber/jaeger-client-go/thrift/README.md | 10 +- .../thrift/application_exception.go | 100 +- .../thrift/binary_protocol.go | 337 +- .../uber/jaeger-client-go/thrift/client.go | 109 + .../thrift/compact_protocol.go | 216 +- .../jaeger-client-go/thrift/configuration.go | 378 +++ .../thrift/{processor.go => context.go} | 10 +- .../uber/jaeger-client-go/thrift/exception.go | 86 +- .../jaeger-client-go/thrift/header_context.go | 110 + .../thrift/header_protocol.go | 351 +++ .../thrift/header_transport.go | 810 +++++ .../uber/jaeger-client-go/thrift/logger.go | 59 + .../jaeger-client-go/thrift/memory_buffer.go | 9 +- .../uber/jaeger-client-go/thrift/numeric.go | 4 +- .../thrift/processor_factory.go | 80 + .../uber/jaeger-client-go/thrift/protocol.go | 148 +- .../thrift/protocol_exception.go | 44 +- .../thrift/response_helper.go | 94 + .../jaeger-client-go/thrift/rich_transport.go | 8 +- .../jaeger-client-go/thrift/serializer.go | 87 +- .../thrift/server_transport.go | 34 + .../thrift/simple_json_protocol.go | 326 +- .../jaeger-client-go/thrift/simple_server.go | 332 ++ .../uber/jaeger-client-go/thrift/transport.go | 12 +- .../thrift/transport_exception.go | 65 +- .../thrift/transport_factory.go | 6 +- .../uber/jaeger-client-go/transport/http.go | 3 +- .../uber/jaeger-client-go/transport_udp.go | 5 +- .../uber/jaeger-client-go/utils/udp_client.go | 8 +- .../go.etcd.io/etcd/pkg/fileutil/dir_unix.go | 27 + .../etcd/pkg/fileutil/dir_windows.go | 51 + vendor/go.etcd.io/etcd/pkg/fileutil/doc.go | 16 + .../go.etcd.io/etcd/pkg/fileutil/fileutil.go | 129 + vendor/go.etcd.io/etcd/pkg/fileutil/lock.go | 26 + .../etcd/pkg/fileutil/lock_flock.go | 49 + .../etcd/pkg/fileutil/lock_linux.go | 97 + .../etcd/pkg/fileutil/lock_plan9.go | 45 + .../etcd/pkg/fileutil/lock_solaris.go | 62 + .../go.etcd.io/etcd/pkg/fileutil/lock_unix.go | 29 + .../etcd/pkg/fileutil/lock_windows.go | 125 + .../etcd/pkg/fileutil/preallocate.go | 54 + .../etcd/pkg/fileutil/preallocate_darwin.go | 65 + .../etcd/pkg/fileutil/preallocate_unix.go | 49 + .../pkg/fileutil/preallocate_unsupported.go | 25 + vendor/go.etcd.io/etcd/pkg/fileutil/purge.go | 98 + .../go.etcd.io/etcd/pkg/fileutil/read_dir.go | 70 + vendor/go.etcd.io/etcd/pkg/fileutil/sync.go | 29 + .../etcd/pkg/fileutil/sync_darwin.go | 40 + .../etcd/pkg/fileutil/sync_linux.go | 34 + .../go.etcd.io/etcd/pkg/transport/listener.go | 12 +- vendor/go.uber.org/atomic/.gitignore | 3 + vendor/go.uber.org/atomic/.travis.yml | 27 - vendor/go.uber.org/atomic/CHANGELOG.md | 6 + vendor/go.uber.org/atomic/Makefile | 1 + vendor/go.uber.org/atomic/README.md | 4 +- vendor/go.uber.org/atomic/bool.go | 2 +- vendor/go.uber.org/atomic/duration.go | 2 +- vendor/go.uber.org/atomic/error.go | 2 +- vendor/go.uber.org/atomic/float64.go | 2 +- vendor/go.uber.org/atomic/gen.go | 1 + vendor/go.uber.org/atomic/go.sum | 1 - vendor/go.uber.org/atomic/int32.go | 2 +- vendor/go.uber.org/atomic/int64.go | 2 +- vendor/go.uber.org/atomic/string.go | 2 +- vendor/go.uber.org/atomic/uint32.go | 2 +- vendor/go.uber.org/atomic/uint64.go | 2 +- vendor/go.uber.org/atomic/uintptr.go | 102 + vendor/go.uber.org/atomic/unsafe_pointer.go | 58 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 1 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 1 + .../x/crypto/blake2b/blake2b_amd64.s | 1 + vendor/golang.org/x/lint/README.md | 4 + vendor/golang.org/x/mod/module/module.go | 76 +- vendor/golang.org/x/net/http2/ascii.go | 4 + vendor/golang.org/x/net/http2/server.go | 12 +- vendor/golang.org/x/sys/cpu/cpu.go | 5 +- vendor/golang.org/x/sys/cpu/cpu_aix.go | 1 + vendor/golang.org/x/sys/unix/README.md | 6 +- vendor/golang.org/x/sys/unix/asm_bsd_386.s | 4 +- vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 4 +- vendor/golang.org/x/sys/unix/mkall.sh | 12 - vendor/golang.org/x/sys/unix/mkerrors.sh | 8 + .../x/sys/unix/syscall_darwin.1_13.go | 4 +- .../x/sys/unix/syscall_darwin_386.go | 51 - .../x/sys/unix/syscall_darwin_arm.go | 51 - .../x/sys/unix/syscall_darwin_libSystem.go | 9 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 71 + .../x/sys/unix/syscall_linux_386.go | 4 + .../x/sys/unix/syscall_linux_amd64.go | 4 + .../x/sys/unix/syscall_linux_arm.go | 4 + .../x/sys/unix/syscall_linux_arm64.go | 4 + .../x/sys/unix/syscall_linux_mips64x.go | 4 + .../x/sys/unix/syscall_linux_mipsx.go | 4 + .../x/sys/unix/syscall_linux_ppc.go | 4 + .../x/sys/unix/syscall_linux_ppc64x.go | 4 + .../x/sys/unix/syscall_linux_riscv64.go | 4 + .../x/sys/unix/syscall_linux_s390x.go | 4 + .../x/sys/unix/syscall_linux_sparc64.go | 4 + .../x/sys/unix/zerrors_darwin_386.go | 1789 ----------- .../x/sys/unix/zerrors_darwin_arm.go | 1789 ----------- vendor/golang.org/x/sys/unix/zerrors_linux.go | 90 + .../x/sys/unix/zerrors_linux_386.go | 19 + .../x/sys/unix/zerrors_linux_amd64.go | 19 + .../x/sys/unix/zerrors_linux_arm.go | 19 + .../x/sys/unix/zerrors_linux_arm64.go | 19 + .../x/sys/unix/zerrors_linux_mips.go | 19 + .../x/sys/unix/zerrors_linux_mips64.go | 19 + .../x/sys/unix/zerrors_linux_mips64le.go | 19 + .../x/sys/unix/zerrors_linux_mipsle.go | 19 + .../x/sys/unix/zerrors_linux_ppc.go | 19 + .../x/sys/unix/zerrors_linux_ppc64.go | 19 + .../x/sys/unix/zerrors_linux_ppc64le.go | 19 + .../x/sys/unix/zerrors_linux_riscv64.go | 19 + .../x/sys/unix/zerrors_linux_s390x.go | 19 + .../x/sys/unix/zerrors_linux_sparc64.go | 19 + .../x/sys/unix/zsyscall_darwin_386.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_386.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_386.go | 2431 --------------- .../x/sys/unix/zsyscall_darwin_386.s | 291 -- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 8 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 572 ++-- .../x/sys/unix/zsyscall_darwin_amd64.s | 852 +++++- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 13 - .../x/sys/unix/zsyscall_darwin_arm.go | 2417 --------------- .../x/sys/unix/zsyscall_darwin_arm.s | 289 -- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 8 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 18 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 572 ++-- .../x/sys/unix/zsyscall_darwin_arm64.s | 852 +++++- .../x/sys/unix/zsysnum_darwin_386.go | 438 --- .../x/sys/unix/zsysnum_darwin_arm.go | 438 --- .../x/sys/unix/ztypes_darwin_386.go | 524 ---- .../x/sys/unix/ztypes_darwin_arm.go | 524 ---- .../x/sys/unix/ztypes_dragonfly_amd64.go | 3 + .../x/sys/unix/ztypes_freebsd_386.go | 5 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 5 +- .../x/sys/unix/ztypes_freebsd_arm.go | 5 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 5 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 163 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 18 +- .../x/sys/unix/ztypes_linux_amd64.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 18 +- .../x/sys/unix/ztypes_linux_arm64.go | 18 +- .../x/sys/unix/ztypes_linux_mips.go | 18 +- .../x/sys/unix/ztypes_linux_mips64.go | 18 +- .../x/sys/unix/ztypes_linux_mips64le.go | 18 +- .../x/sys/unix/ztypes_linux_mipsle.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 18 +- .../x/sys/unix/ztypes_linux_ppc64.go | 18 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 18 +- .../x/sys/unix/ztypes_linux_riscv64.go | 18 +- .../x/sys/unix/ztypes_linux_s390x.go | 18 +- .../x/sys/unix/ztypes_linux_sparc64.go | 18 +- .../x/sys/unix/ztypes_netbsd_386.go | 4 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 4 +- .../x/sys/unix/ztypes_openbsd_386.go | 4 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 4 +- .../x/sys/unix/ztypes_openbsd_arm.go | 4 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 4 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 4 +- vendor/golang.org/x/sys/windows/empty.s | 1 + .../golang.org/x/sys/windows/exec_windows.go | 81 +- .../x/sys/windows/syscall_windows.go | 1 + .../golang.org/x/sys/windows/types_windows.go | 17 +- .../x/sys/windows/zsyscall_windows.go | 13 + vendor/golang.org/x/time/rate/rate.go | 3 - .../x/tools/cmd/goimports/goimports_gc.go | 1 + .../x/tools/cmd/goimports/goimports_not_gc.go | 1 + .../x/tools/go/ast/astutil/rewrite.go | 8 +- .../x/tools/go/gcexportdata/gcexportdata.go | 32 +- .../x/tools/go/internal/gcimporter/iexport.go | 90 +- .../x/tools/go/internal/gcimporter/iimport.go | 100 +- .../go/internal/gcimporter/newInterface10.go | 1 + .../go/internal/gcimporter/newInterface11.go | 1 + .../x/tools/internal/event/label/label.go | 8 +- .../fastwalk/fastwalk_dirent_fileno.go | 1 + .../internal/fastwalk/fastwalk_dirent_ino.go | 1 + .../fastwalk/fastwalk_dirent_namlen_bsd.go | 1 + .../fastwalk/fastwalk_dirent_namlen_linux.go | 4 +- .../internal/fastwalk/fastwalk_portable.go | 1 + .../tools/internal/fastwalk/fastwalk_unix.go | 29 +- .../x/tools/internal/gocommand/vendor.go | 7 +- .../x/tools/internal/gocommand/version.go | 2 +- .../x/tools/internal/imports/mod.go | 6 +- .../x/tools/internal/imports/zstdlib.go | 217 ++ .../x/tools/internal/typeparams/doc.go | 11 + .../tools/internal/typeparams/notypeparams.go | 90 + .../x/tools/internal/typeparams/typeparams.go | 105 + .../v1/cloudresourcemanager-gen.go | 76 +- .../api/storage/v1/storage-gen.go | 104 +- .../transport/http/configure_http2_go116.go | 2 +- .../api/transport/internal/dca/dca.go | 2 - .../admin/v2/bigtable_table_admin.pb.go | 12 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 4 + .../grpc/balancer_conn_wrappers.go | 4 +- vendor/google.golang.org/grpc/clientconn.go | 12 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 4 + .../grpc/credentials/credentials.go | 24 +- .../grpc/credentials/google/google.go | 13 +- .../grpc/credentials/google/xds.go | 90 + vendor/google.golang.org/grpc/dialoptions.go | 17 +- vendor/google.golang.org/grpc/go.sum | 3 - .../health/grpc_health_v1/health_grpc.pb.go | 4 + .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/internal.go | 11 +- .../internal/resolver/dns/dns_resolver.go | 43 +- .../internal/serviceconfig/serviceconfig.go | 16 + .../grpc/internal/transport/controlbuf.go | 32 + .../grpc/internal/transport/http2_client.go | 77 +- .../grpc/internal/transport/http2_server.go | 39 +- .../grpc/internal/transport/transport.go | 7 +- .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/metadata/metadata.go | 26 +- vendor/google.golang.org/grpc/regenerate.sh | 10 - .../grpc/resolver/resolver.go | 2 +- .../grpc/resolver_conn_wrapper.go | 63 +- vendor/google.golang.org/grpc/rpc_util.go | 19 +- vendor/google.golang.org/grpc/server.go | 113 +- vendor/google.golang.org/grpc/stream.go | 20 +- vendor/google.golang.org/grpc/tap/tap.go | 16 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 24 +- vendor/modules.txt | 62 +- 421 files changed, 23888 insertions(+), 21497 deletions(-) create mode 100644 vendor/cloud.google.com/go/SECURITY.md delete mode 100644 vendor/cloud.google.com/go/bigtable/.repo-metadata.json create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/NOTICE create mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/go-kit/log/.gitignore create mode 100644 vendor/github.com/go-kit/log/LICENSE create mode 100644 vendor/github.com/go-kit/log/README.md create mode 100644 vendor/github.com/go-kit/log/doc.go create mode 100644 vendor/github.com/go-kit/log/go.mod create mode 100644 vendor/github.com/go-kit/log/go.sum create mode 100644 vendor/github.com/go-kit/log/json_logger.go create mode 100644 vendor/github.com/go-kit/log/level/doc.go create mode 100644 vendor/github.com/go-kit/log/level/level.go create mode 100644 vendor/github.com/go-kit/log/log.go create mode 100644 vendor/github.com/go-kit/log/logfmt_logger.go create mode 100644 vendor/github.com/go-kit/log/nop_logger.go create mode 100644 vendor/github.com/go-kit/log/stdlib.go create mode 100644 vendor/github.com/go-kit/log/sync.go create mode 100644 vendor/github.com/go-kit/log/value.go rename vendor/github.com/miekg/dns/{listen_go_not111.go => listen_no_reuseport.go} (100%) rename vendor/github.com/miekg/dns/{listen_go111.go => listen_reuseport.go} (100%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go rename vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/{constants.go => agent-consts.go} (75%) delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go rename vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/{constants.go => baggage-consts.go} (63%) create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go rename vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/{constants.go => jaeger-consts.go} (63%) create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go rename vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/{constants.go => sampling-consts.go} (63%) create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go rename vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/{constants.go => zipkincore-consts.go} (83%) create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/client.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/configuration.go rename vendor/github.com/uber/jaeger-client-go/thrift/{processor.go => context.go} (74%) create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/header_context.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/logger.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/purge.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go create mode 100644 vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go delete mode 100644 vendor/go.uber.org/atomic/.travis.yml create mode 100644 vendor/go.uber.org/atomic/uintptr.go create mode 100644 vendor/go.uber.org/atomic/unsafe_pointer.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/doc.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/notypeparams.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams.go create mode 100644 vendor/google.golang.org/grpc/credentials/google/xds.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go diff --git a/cmd/query-tee/main.go b/cmd/query-tee/main.go index 87bd65722ca..49b875f4eca 100644 --- a/cmd/query-tee/main.go +++ b/cmd/query-tee/main.go @@ -6,6 +6,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" "github.com/weaveworks/common/logging" "github.com/weaveworks/common/server" @@ -35,7 +36,7 @@ func main() { // Run the instrumentation server. registry := prometheus.NewRegistry() - registry.MustRegister(prometheus.NewGoCollector()) + registry.MustRegister(collectors.NewGoCollector()) i := querytee.NewInstrumentationServer(cfg.ServerMetricsPort, registry) if err := i.Start(); err != nil { diff --git a/go.mod b/go.mod index 0b795d52f6a..aac1346542d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/cortexproject/cortex go 1.16 require ( - cloud.google.com/go/bigtable v1.2.0 + cloud.google.com/go/bigtable v1.3.0 cloud.google.com/go/storage v1.10.0 github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-storage-blob-go v0.8.0 @@ -12,7 +12,7 @@ require ( github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 github.com/alicebob/miniredis/v2 v2.14.3 github.com/armon/go-metrics v0.3.6 - github.com/aws/aws-sdk-go v1.38.35 + github.com/aws/aws-sdk-go v1.38.60 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.0 @@ -45,28 +45,28 @@ require ( github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/alertmanager v0.22.1-0.20210603124511-8b584eb2265e - github.com/prometheus/client_golang v1.10.0 + github.com/prometheus/alertmanager v0.22.3-0.20210628111558-8491f816296b + github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.26.1-0.20210603143733-6ef301f414bf - github.com/prometheus/prometheus v1.8.2-0.20210510213326-e313ffa8abf6 + github.com/prometheus/common v0.29.0 + github.com/prometheus/prometheus v1.8.2-0.20210720084720-59d02b5ef003 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.4.1 github.com/spf13/afero v1.2.2 github.com/stretchr/testify v1.7.0 github.com/thanos-io/thanos v0.19.1-0.20210427154226-d5bd651319d2 - github.com/uber/jaeger-client-go v2.28.0+incompatible + github.com/uber/jaeger-client-go v2.29.1+incompatible github.com/weaveworks/common v0.0.0-20210419092856-009d1eebd624 go.etcd.io/bbolt v1.3.5 - go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f + go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 go.etcd.io/etcd/client/v3 v3.5.0-alpha.0.0.20210225194612-fa82d11a958a go.etcd.io/etcd/server/v3 v3.5.0-alpha.0.0.20210225194612-fa82d11a958a - go.uber.org/atomic v1.7.0 - golang.org/x/net v0.0.0-20210525063256-abc453219eb5 + go.uber.org/atomic v1.8.0 + golang.org/x/net v0.0.0-20210610132358-84b48f89b13b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba - google.golang.org/api v0.46.0 - google.golang.org/grpc v1.37.0 + golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 + google.golang.org/api v0.48.0 + google.golang.org/grpc v1.38.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b sigs.k8s.io/yaml v1.2.0 diff --git a/go.sum b/go.sum index 251c4c9bf41..b36fdcf8cc2 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= @@ -21,8 +22,9 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -30,8 +32,9 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.1.0/go.mod h1:B6ByKcIdYmhoyDzmOnQxyOhN6r05qnewYIxxG6L0/b4= -cloud.google.com/go/bigtable v1.2.0 h1:F4cCmA4nuV84V5zYQ3MKY+M1Cw1avHDuf3S/LcZPA9c= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/bigtable v1.3.0 h1:PAplkJLXheOLlK5PPyy4/HXtPzHn+1/LaYDWIeGxnio= +cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= @@ -52,8 +55,10 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -62,34 +67,44 @@ github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v51.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v54.0.0+incompatible h1:Bq3L9LF0DHCexlT0fccwxgrOMfjHx8LGz+d+L7gGQv4= -github.com/Azure/azure-sdk-for-go v54.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v55.2.0+incompatible h1:TL2/vJWJEPOrmv97nHcbvjXES0Ntlb9P95hqGA1J2dU= +github.com/Azure/azure-sdk-for-go v55.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.2/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.11/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.19 h1:7/IqD2fEYVha1EPeaiytVKhzmPV223pfkRIQUGOK2IE= +github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14 h1:G8hexQdV5D4khOXrWG2YuLCFKhWYmWD8bHYaXN5ophk= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -118,18 +133,37 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= -github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= +github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= +github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 h1:PPfYWScYacO3Q6JMCLkyh6Ea2Q/REDTMgmiTAeiV8Jg= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -145,6 +179,8 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -159,6 +195,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= @@ -170,10 +207,13 @@ github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9 github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= @@ -191,11 +231,14 @@ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:o github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.22.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= @@ -205,11 +248,14 @@ github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+ github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.8/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.35 h1:7AlAO0FC+8nFjxiGKEmq0QLpiA8/XFr6eIxgRTwkdTg= -github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.60 h1:MgyEsX0IMwivwth1VwEnesBpH0vxbjp5a0w1lurMOXY= +github.com/aws/aws-sdk-go v1.38.60/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -217,13 +263,23 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -241,12 +297,17 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/chromedp/cdproto v0.0.0-20200116234248-4da64dd111ac/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= github.com/chromedp/cdproto v0.0.0-20200424080200-0de008e41fa0/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= github.com/chromedp/chromedp v0.5.3/go.mod h1:YLdPtndaHQ4rCpSpBG+IPpy9JvX0VD+7aaLxYgYj28w= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= @@ -265,23 +326,111 @@ github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoC github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.3 h1:mfKOepNDIJ3EiBTEyHFpEqB6YSOSkGcjPDIu7cD+YzY= +github.com/containerd/containerd v1.5.3/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= @@ -299,6 +448,7 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= @@ -309,12 +459,19 @@ github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= @@ -333,10 +490,13 @@ github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.57.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= -github.com/digitalocean/godo v1.60.0 h1:o/vimtn/HKtYSakFAAZ59Zc5ASORd41S4z1X7pAXPn8= -github.com/digitalocean/godo v1.60.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.62.0 h1:7Gw2KFsWkxl36qJa0s50tgXaE0Cgm51JdRP+MFQvNnM= +github.com/digitalocean/godo v1.62.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -344,13 +504,18 @@ github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -371,6 +536,7 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkg github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -397,13 +563,17 @@ github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/ github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -412,9 +582,11 @@ github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0 github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -552,6 +724,8 @@ github.com/go-redis/redis/v8 v8.0.0-beta.10.0.20200905143926-df7fe4e2ce72/go.mod github.com/go-redis/redis/v8 v8.2.3/go.mod h1:ysgGY09J/QeDYbu3HikWEIPCwaeOkuNoTgKayTEaEOw= github.com/go-redis/redis/v8 v8.9.0 h1:FTTbB7WqlXfVNdVv0SsxA+oVi0bAwit6bMe3IUucq2o= github.com/go-redis/redis/v8 v8.9.0/go.mod h1:ik7vb7+gm8Izylxu6kf6wG26/t2VljgCfSQ1DM4O1uU= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -586,13 +760,18 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0 h1:kFkMAZBNAn4j7K0GiZr8cRYzejq68VbheufiV3YuyFI= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -607,6 +786,7 @@ github.com/gogo/status v1.0.3 h1:WkVBY59mw7qUNTr/bLwO7J2vesJ0rQ2C3tMXrTd3w5M= github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/golang-migrate/migrate/v4 v4.7.0 h1:gONcHxHApDTKXDyLH/H97gEHmpu1zcnnbAaq2zgrPrs= github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -668,8 +848,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -680,14 +861,16 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -700,14 +883,16 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210208152844-1612e9be7af6/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10 h1:wAh7XxYU1O92WP9JMsK0elU+haxEN0HTc2m/C89wQvk= -github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 h1:2tft2559dNwKl2znYB58oVTql0grRB+Ml3LWIBbc4WM= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -723,20 +908,23 @@ github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyyc github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= +github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.11.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gophercloud/gophercloud v0.14.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gophercloud/gophercloud v0.15.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= -github.com/gophercloud/gophercloud v0.17.0 h1:BgVw0saxyeHWH5us/SQe1ltp0GRnytjmOLXDA8pO77E= -github.com/gophercloud/gophercloud v0.17.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.18.0 h1:V6hcuMPmjXg+js9flU8T3RIHDCjV7F5CG5GD0MRhP/w= +github.com/gophercloud/gophercloud v0.18.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -762,6 +950,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -784,6 +973,7 @@ github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPA github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -799,6 +989,7 @@ github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -843,32 +1034,45 @@ github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwI github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/hetznercloud/hcloud-go v1.23.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= -github.com/hetznercloud/hcloud-go v1.25.0 h1:QAaFKtGKWRxjwjKJWBGMxGYUxVEQmIkb35j/WXrsazY= -github.com/hetznercloud/hcloud-go v1.25.0/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= +github.com/hetznercloud/hcloud-go v1.26.2 h1:fI8BXAGJI4EFeCDd2a/I4EhqyK32cDdxGeWfYMGUi50= +github.com/hetznercloud/hcloud-go v1.26.2/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/flux v0.113.0/go.mod h1:3TJtvbm/Kwuo5/PEo5P6HUzwVg4bXWkb2wPQHPtQdlU= +github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb v1.8.1/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb v1.8.5/go.mod h1:oFH+pbEyDln/1TKwa98oJzVrkZwdjrJOwIDGYZj7Ma0= +github.com/influxdata/influxdb v1.9.2/go.mod h1:UEe3MeD9AaP5rlPIes102IhYua3FhIWZuOXNHxDjSrI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/pkg-config v0.2.6/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -876,6 +1080,7 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -908,6 +1113,7 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -924,6 +1130,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= @@ -963,9 +1171,12 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linode/linodego v0.28.5 h1:JaCziTxHJ7a01MYyjHqskRc8zXQxXOddwrDeoQ2rBnw= +github.com/linode/linodego v0.28.5/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -978,6 +1189,7 @@ github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -997,13 +1209,15 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= @@ -1020,8 +1234,11 @@ github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.38/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.42 h1:gWGe42RGaIqXQZ+r3WUGEKBEtvPHY2SXo4dqixDNxuY= +github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= @@ -1031,6 +1248,7 @@ github.com/minio/minio-go/v7 v7.0.10 h1:1oUKe4EOPUEhw2qnPQaPsJ0lmVTYLFu03SiItauX github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1050,7 +1268,13 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1068,8 +1292,10 @@ github.com/mozillazg/go-cos v0.13.0 h1:RylOpEESdWMLb13bl0ADhko12uMN3JmHqqwFu4OYG github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -1082,6 +1308,7 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU= github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= @@ -1099,6 +1326,7 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1111,6 +1339,7 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1119,14 +1348,32 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -1154,13 +1401,16 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1171,6 +1421,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= @@ -1179,8 +1430,10 @@ github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939/go.mod github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= github.com/prometheus/alertmanager v0.21.1-0.20210310093010-0f9cab6991e6/go.mod h1:MTqVn+vIupE0dzdgo+sMcNCp37SCAi8vPrvKTTnTz9g= github.com/prometheus/alertmanager v0.21.1-0.20210422101724-8176f78a70e1/go.mod h1:gsEqwD5BHHW9RNKvCuPOrrTMiP5I+faJUyLXvnivHik= -github.com/prometheus/alertmanager v0.22.1-0.20210603124511-8b584eb2265e h1:FNLZCG1rR9QPbkwYLMiy7TnI4WRB7TeipcxkrbszN4E= -github.com/prometheus/alertmanager v0.22.1-0.20210603124511-8b584eb2265e/go.mod h1:ntrorfzWQ1I9mhJK7AO71w4xMUgM4SxmwbtyQgAWZz0= +github.com/prometheus/alertmanager v0.22.2/go.mod h1:rYinOWxFuCnNssc3iOjn2oMTlhLaPcUuqV5yk5JKUAE= +github.com/prometheus/alertmanager v0.22.3-0.20210628111558-8491f816296b h1:Ug/6s8FVHfsuSGFNUTAhvp3ZXkeAVk2TvHeolesN/wk= +github.com/prometheus/alertmanager v0.22.3-0.20210628111558-8491f816296b/go.mod h1:ntrorfzWQ1I9mhJK7AO71w4xMUgM4SxmwbtyQgAWZz0= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -1199,9 +1452,11 @@ github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= -github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1209,6 +1464,7 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1229,18 +1485,22 @@ github.com/prometheus/common v0.20.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.21.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.24.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= -github.com/prometheus/common v0.26.1-0.20210603143733-6ef301f414bf h1:w+U3wF/6JRY6+MNfxTodIBw6dZaE11Y+Arg3bRLqatI= -github.com/prometheus/common v0.26.1-0.20210603143733-6ef301f414bf/go.mod h1:LdLj/WiR+LL0ThCPrtSZbijrsxInIhizDTiPlJhPPq4= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/exporter-toolkit v0.5.0/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= +github.com/prometheus/exporter-toolkit v0.6.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -1253,6 +1513,7 @@ github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3x github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= +github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c/go.mod h1:/kMSPIRsxr/apyHxlzYMdFnaPXUXXqILU5uzIoNhOvc= @@ -1268,8 +1529,8 @@ github.com/prometheus/prometheus v1.8.2-0.20210215121130-6f488061dfb4/go.mod h1: github.com/prometheus/prometheus v1.8.2-0.20210315220929-1cba1741828b/go.mod h1:MS/bpdil77lPbfQeKk6OqVQ9OLnpN3Rszd0hka0EOWE= github.com/prometheus/prometheus v1.8.2-0.20210324152458-c7a62b95cea0/go.mod h1:sf7j/iAbhZahjeC0s3wwMmp5dksrJ/Za1UKdR+j6Hmw= github.com/prometheus/prometheus v1.8.2-0.20210421143221-52df5ef7a3be/go.mod h1:WbIKsp4vWCoPHis5qQfd0QimLOR7qe79roXN5O8U8bs= -github.com/prometheus/prometheus v1.8.2-0.20210510213326-e313ffa8abf6 h1:VXojCB7PbAsMMD0udzYwR2s8Uyun7jmBaRN6Gq5+HVg= -github.com/prometheus/prometheus v1.8.2-0.20210510213326-e313ffa8abf6/go.mod h1:yUzDYX0hIYu5YVHmpj/JXLOclB6QcLNDgmagD3FUnSU= +github.com/prometheus/prometheus v1.8.2-0.20210720084720-59d02b5ef003 h1:MYbsDV+OIFLkqwea5oC3jIUp/HxFyXQrvXpw/hooMRQ= +github.com/prometheus/prometheus v1.8.2-0.20210720084720-59d02b5ef003/go.mod h1:o6V+A4iPEWjLG0rSEKeev3OzfBZwP+ay+4iS4dkfLI4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1287,6 +1548,7 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -1299,6 +1561,7 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -1318,7 +1581,9 @@ github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJV github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -1333,6 +1598,7 @@ github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUr github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5 h1:GJTW+uNMIV1RKwox+T4aN0/sQlYRg78uHZf2H0aBcDw= github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -1346,23 +1612,30 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1372,6 +1645,10 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf/go.mod h1:G8caR6G7pSDreRDvFm9wFuyjEBztmr8Ag3kBYpa/fEc= github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763/go.mod h1:KyW0a93tsh7v4hXAwo2CVAIRYuZT1Kkf4e04gisQjAg= @@ -1390,27 +1667,40 @@ github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1C github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966 h1:j6JEOq5QWFker+d7mFQYOhjTZonQ7YkLTHm56dbn+yM= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= +github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI= -github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= @@ -1421,6 +1711,9 @@ github.com/weaveworks/common v0.0.0-20210419092856-009d1eebd624/go.mod h1:ykzWac github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= @@ -1428,18 +1721,26 @@ github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm v1.11.0/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= @@ -1456,8 +1757,9 @@ go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5/go.mod h1:N0RPWo9FXJYZQI4BTkDtQylrstIigYHeR18ONnyTufk= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f h1:pBCD+Z7cy5WPTq+R6MmJJvDRpn88cp7bmTypBsn91g4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0-alpha.0 h1:+e5nrluATIy3GP53znpkHMFzPTHGYyzvJGFCbuI6ZLc= go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/client/v2 v2.305.0-alpha.0 h1:jZepGpOeJATxsbMNBZczDS2jHdK/QVHM1iPe9jURJ8o= @@ -1484,6 +1786,7 @@ go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1508,8 +1811,9 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.8.0 h1:CUhrE4N1rqSE6FM9ecihEjRkLQu8cDfgDyoOs83mEY4= +go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= @@ -1524,11 +1828,15 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1556,8 +1864,10 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1587,8 +1897,9 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1598,13 +1909,15 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1622,6 +1935,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1663,11 +1977,12 @@ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1683,7 +1998,6 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1725,13 +2039,18 @@ golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1748,15 +2067,18 @@ golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1778,16 +2100,22 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1802,11 +2130,15 @@ golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c= -golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1828,8 +2160,9 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1890,9 +2223,11 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1902,6 +2237,7 @@ golang.org/x/tools v0.0.0-20200603131246-cc40288be839/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1917,8 +2253,11 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1927,9 +2266,11 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1957,8 +2298,9 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1969,6 +2311,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1976,6 +2319,7 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -1989,6 +2333,7 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1997,6 +2342,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2013,6 +2359,7 @@ google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2022,8 +2369,11 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab h1:dkb90hr43A2Q5as5ZBphcOF2II0+EqfCBqGp7qFSpN4= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2041,6 +2391,7 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -2052,8 +2403,11 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2071,6 +2425,7 @@ gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4 gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= @@ -2090,7 +2445,11 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -2129,19 +2488,33 @@ k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= +k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -2155,15 +2528,18 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2174,12 +2550,16 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 30d6b447355..8f5e93caafc 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -73,7 +73,7 @@ var ( } DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, compact.Planner, error) { - compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) + compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool(), nil) if err != nil { return nil, nil, err } diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index da1a83cead3..911c59aa26d 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -958,7 +958,7 @@ func createTSDBBlock(t *testing.T, bkt objstore.Bucket, userID string, minT, max MinBlockDuration: int64(2 * 60 * 60 * 1000), // 2h period MaxBlockDuration: int64(2 * 60 * 60 * 1000), // 2h period RetentionDuration: int64(15 * 86400 * 1000), // 15 days - }) + }, nil) require.NoError(t, err) db.DisableCompactions() diff --git a/pkg/configs/legacy_promql/test.go b/pkg/configs/legacy_promql/test.go index 2bfa874b7da..aeaccd8deaa 100644 --- a/pkg/configs/legacy_promql/test.go +++ b/pkg/configs/legacy_promql/test.go @@ -564,7 +564,7 @@ func NewStorage(t T) storage.Storage { db, err := tsdb.Open(dir, nil, nil, &tsdb.Options{ MinBlockDuration: int64(24 * time.Hour / time.Millisecond), MaxBlockDuration: int64(24 * time.Hour / time.Millisecond), - }) + }, nil) if err != nil { t.Fatalf("Opening test storage failed: %s", err) } diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go index e55542fd443..64a3006b065 100644 --- a/pkg/configs/userconfig/config.go +++ b/pkg/configs/userconfig/config.go @@ -369,6 +369,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { labels.FromMap(rl.Labels), labels.FromMap(rl.Annotations), nil, + "", true, log.With(util_log.Logger, "alert", rl.Alert.Value), )) @@ -417,7 +418,14 @@ func (c RulesConfig) parseV1() (map[string][]rules.Rule, error) { } rule = rules.NewAlertingRule( - r.Name, expr, r.Duration, r.Labels, r.Annotations, nil, true, + r.Name, + expr, + r.Duration, + r.Labels, + r.Annotations, + nil, + "", + true, log.With(util_log.Logger, "alert", r.Name), ) diff --git a/pkg/configs/userconfig/config_test.go b/pkg/configs/userconfig/config_test.go index 384aa44522a..2a98a5ddb81 100644 --- a/pkg/configs/userconfig/config_test.go +++ b/pkg/configs/userconfig/config_test.go @@ -91,6 +91,7 @@ func TestParseLegacyAlerts(t *testing.T) { labels.Label{Name: "message", Value: "I am a message"}, }, nil, + "", true, log.With(util_log.Logger, "alert", "TestAlert"), ) diff --git a/pkg/ingester/ingester_v2.go b/pkg/ingester/ingester_v2.go index d43e09ac9c8..d0f2e43c6b3 100644 --- a/pkg/ingester/ingester_v2.go +++ b/pkg/ingester/ingester_v2.go @@ -1566,6 +1566,10 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { instanceSeriesCount: &i.TSDBState.seriesCount, } + enableExemplars := false + if i.cfg.BlocksStorageConfig.TSDB.MaxExemplars > 0 { + enableExemplars = true + } // Create a new user database db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), @@ -1578,8 +1582,9 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { WALSegmentSize: i.cfg.BlocksStorageConfig.TSDB.WALSegmentSizeBytes, SeriesLifecycleCallback: userDB, BlocksToDelete: userDB.blocksToDelete, - MaxExemplars: i.cfg.BlocksStorageConfig.TSDB.MaxExemplars, - }) + EnableExemplarStorage: enableExemplars, + MaxExemplars: int64(i.cfg.BlocksStorageConfig.TSDB.MaxExemplars), + }, nil) if err != nil { return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) } diff --git a/pkg/ingester/ingester_v2_test.go b/pkg/ingester/ingester_v2_test.go index 6122569f11f..1cd19d55c8c 100644 --- a/pkg/ingester/ingester_v2_test.go +++ b/pkg/ingester/ingester_v2_test.go @@ -3317,7 +3317,7 @@ func TestHeadCompactionOnStartup(t *testing.T) { NoLockfile: true, MinBlockDuration: chunkRange, MaxBlockDuration: chunkRange, - }) + }, nil) require.NoError(t, err) db.DisableCompactions() diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index d37911a57ba..bc6d3819b67 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -181,7 +181,7 @@ func mockTSDB(t *testing.T, mint model.Time, samples int, step, chunkOffset time opts := tsdb.DefaultHeadOptions() opts.ChunkDirRoot = dir // We use TSDB head only. By using full TSDB DB, and appending samples to it, closing it would cause unnecessary HEAD compaction, which slows down the test. - head, err := tsdb.NewHead(nil, nil, nil, opts) + head, err := tsdb.NewHead(nil, nil, nil, opts, nil) require.NoError(t, err) t.Cleanup(func() { _ = head.Close() diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 7828a9f14d5..8ee0fe51917 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -213,7 +213,7 @@ type RulesManager interface { Stop() // Updates rules manager state. - Update(interval time.Duration, files []string, externalLabels labels.Labels) error + Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string) error // Returns current rules groups. RuleGroups() []*rules.Group diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go index 83e6818bf6e..06fec771baf 100644 --- a/pkg/ruler/manager.go +++ b/pkg/ruler/manager.go @@ -148,7 +148,7 @@ func (r *DefaultMultiTenantManager) syncRulesToManager(ctx context.Context, user go manager.Run() r.userManagers[user] = manager } - err = manager.Update(r.cfg.EvaluationInterval, files, nil) + err = manager.Update(r.cfg.EvaluationInterval, files, nil, r.cfg.ExternalURL.String()) if err != nil { r.lastReloadSuccessful.WithLabelValues(user).Set(0) level.Error(r.logger).Log("msg", "unable to update rule manager", "user", user, "err", err) diff --git a/pkg/ruler/manager_test.go b/pkg/ruler/manager_test.go index 785fa756c40..25cd569a1f3 100644 --- a/pkg/ruler/manager_test.go +++ b/pkg/ruler/manager_test.go @@ -124,7 +124,7 @@ func (m *mockRulesManager) Stop() { close(m.done) } -func (m *mockRulesManager) Update(_ time.Duration, _ []string, _ labels.Labels) error { +func (m *mockRulesManager) Update(_ time.Duration, _ []string, _ labels.Labels, _ string) error { return nil } diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 1a253a0767f..7cb51d75136 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -408,7 +408,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st require.NoError(t, os.RemoveAll(tmpDir)) }() - db, err := tsdb.Open(tmpDir, log.NewNopLogger(), nil, tsdb.DefaultOptions()) + db, err := tsdb.Open(tmpDir, log.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index d1165b51ff9..84fb469ea9f 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1005,7 +1005,7 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int MinBlockDuration: 2 * time.Hour.Milliseconds(), MaxBlockDuration: 2 * time.Hour.Milliseconds(), RetentionDuration: 15 * 24 * time.Hour.Milliseconds(), - }) + }, nil) require.NoError(t, err) db.DisableCompactions() diff --git a/tools/blocksconvert/builder/tsdb_test.go b/tools/blocksconvert/builder/tsdb_test.go index 8ab5fb945ae..22f16e6d3d0 100644 --- a/tools/blocksconvert/builder/tsdb_test.go +++ b/tools/blocksconvert/builder/tsdb_test.go @@ -81,7 +81,7 @@ func TestTsdbBuilder(t *testing.T) { id, err := b.finishBlock("unit test", map[string]string{"ext_label": "12345"}) require.NoError(t, err) - db, err := tsdb.Open(dir, util_log.Logger, prometheus.NewRegistry(), tsdb.DefaultOptions()) + db, err := tsdb.Open(dir, util_log.Logger, prometheus.NewRegistry(), tsdb.DefaultOptions(), nil) require.NoError(t, err) blocks := db.Blocks() diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 6fc75f1f105..9931f7a8db2 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,65 @@ # Changes +## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02) + + +### Features + +* **dialogflow:** added a field in the query result to indicate whether slot filling is cancelled. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) +* **essentialcontacts:** start generating apiv1 ([#4118](https://www.github.com/googleapis/google-cloud-go/issues/4118)) ([fe14afc](https://www.github.com/googleapis/google-cloud-go/commit/fe14afcf74e09089b22c4f5221cbe37046570fda)) +* **gsuiteaddons:** start generating apiv1 ([#4082](https://www.github.com/googleapis/google-cloud-go/issues/4082)) ([6de5c99](https://www.github.com/googleapis/google-cloud-go/commit/6de5c99173c4eeaf777af18c47522ca15637d232)) +* **osconfig:** OSConfig: add ExecResourceOutput and per step error message. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) +* **osconfig:** start generating apiv1alpha ([#4119](https://www.github.com/googleapis/google-cloud-go/issues/4119)) ([8ad471f](https://www.github.com/googleapis/google-cloud-go/commit/8ad471f26087ec076460df6dcf27769ffe1b8834)) +* **privatecatalog:** start generating apiv1beta1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) +* **serviceusage:** start generating apiv1 ([#4120](https://www.github.com/googleapis/google-cloud-go/issues/4120)) ([e4531f9](https://www.github.com/googleapis/google-cloud-go/commit/e4531f93cfeb6388280bb253ef6eb231aba37098)) +* **shell:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) +* **vpcaccess:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) + +## [0.82.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.82.0) (2021-05-17) + + +### Features + +* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) +* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) +* **cloudbuild/apiv1:** Add fields for Pub/Sub triggers ([8b4adbf](https://www.github.com/googleapis/google-cloud-go/commit/8b4adbf9815e1ec229dfbcfb9189d3ea63112e1b)) +* **cloudbuild/apiv1:** Implementation of Source Manifests: - Added message StorageSourceManifest as an option for the Source message - Added StorageSourceManifest field to the SourceProvenance message ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) +* **clouddms:** start generating apiv1 ([#4081](https://www.github.com/googleapis/google-cloud-go/issues/4081)) ([29df85c](https://www.github.com/googleapis/google-cloud-go/commit/29df85c40ab64d59e389a980c9ce550077839763)) +* **dataproc:** update the Dataproc V1 API client library ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) +* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) +* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **dialogflow:** added more Environment RPCs feat: added Versions service feat: added Fulfillment service feat: added TextToSpeechSettings. feat: added location in some resource patterns. ([4f73dc1](https://www.github.com/googleapis/google-cloud-go/commit/4f73dc19c2e05ad6133a8eac3d62ddb522314540)) +* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) +* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([07fdcd1](https://www.github.com/googleapis/google-cloud-go/commit/07fdcd12499eac26f9b5fae01d6c1282c3e02b7c)) +* **internal/gapicgen:** only update relevant gapic files ([#4066](https://www.github.com/googleapis/google-cloud-go/issues/4066)) ([5948bee](https://www.github.com/googleapis/google-cloud-go/commit/5948beedbadd491601bdee6a006cf685e94a85f4)) +* **internal/gensnippets:** add license header and region tags ([#3924](https://www.github.com/googleapis/google-cloud-go/issues/3924)) ([e9ff7a0](https://www.github.com/googleapis/google-cloud-go/commit/e9ff7a0f9bb1cc67f5d0de47934811960429e72c)) +* **internal/gensnippets:** initial commit ([#3922](https://www.github.com/googleapis/google-cloud-go/issues/3922)) ([3fabef0](https://www.github.com/googleapis/google-cloud-go/commit/3fabef032388713f732ab4dbfc51624cdca0f481)) +* **internal:** auto-generate snippets ([#3949](https://www.github.com/googleapis/google-cloud-go/issues/3949)) ([b70e0fc](https://www.github.com/googleapis/google-cloud-go/commit/b70e0fccdc86813e0d97ff63b585822d4deafb38)) +* **internal:** generate region tags for snippets ([#3962](https://www.github.com/googleapis/google-cloud-go/issues/3962)) ([ef2b90e](https://www.github.com/googleapis/google-cloud-go/commit/ef2b90ea6d47e27744c98a1a9ae0c487c5051808)) +* **metastore:** start generateing apiv1 ([#4083](https://www.github.com/googleapis/google-cloud-go/issues/4083)) ([661610a](https://www.github.com/googleapis/google-cloud-go/commit/661610afa6a9113534884cafb138109536724310)) +* **security/privateca:** start generating apiv1 ([#4023](https://www.github.com/googleapis/google-cloud-go/issues/4023)) ([08aa83a](https://www.github.com/googleapis/google-cloud-go/commit/08aa83a5371bb6485bc3b19b3ed5300f807ce69f)) +* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) +* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) +* **speech:** add webm opus support. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) +* **speech:** Support for spoken punctuation and spoken emojis. ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) + + +### Bug Fixes + +* **binaryauthorization:** add Java options to Binaryauthorization protos ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **internal/gapicgen:** filter out internal directory changes ([#4085](https://www.github.com/googleapis/google-cloud-go/issues/4085)) ([01473f6](https://www.github.com/googleapis/google-cloud-go/commit/01473f6d8db26c6e18969ace7f9e87c66e94ad9e)) +* **internal/gapicgen:** use correct region tags for gensnippets ([#4022](https://www.github.com/googleapis/google-cloud-go/issues/4022)) ([8ccd689](https://www.github.com/googleapis/google-cloud-go/commit/8ccd689cab08f016008ca06a939a4828817d4a25)) +* **internal/gensnippets:** run goimports ([#3931](https://www.github.com/googleapis/google-cloud-go/issues/3931)) ([10050f0](https://www.github.com/googleapis/google-cloud-go/commit/10050f05c20c226547d87c08168fa4bc551395c5)) +* **internal:** append a new line to comply with go fmt ([#4028](https://www.github.com/googleapis/google-cloud-go/issues/4028)) ([a297278](https://www.github.com/googleapis/google-cloud-go/commit/a2972783c4af806199d1c67c9f63ad9677f20f34)) +* **internal:** make sure formatting is run on snippets ([#4039](https://www.github.com/googleapis/google-cloud-go/issues/4039)) ([130dfc5](https://www.github.com/googleapis/google-cloud-go/commit/130dfc535396e98fc009585b0457e3bc48ead941)), refs [#4037](https://www.github.com/googleapis/google-cloud-go/issues/4037) +* **metastore:** increase metastore lro polling timeouts ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) + + +### Miscellaneous Chores + +* **all:** fix release version ([#4040](https://www.github.com/googleapis/google-cloud-go/issues/4040)) ([4c991a9](https://www.github.com/googleapis/google-cloud-go/commit/4c991a928665d9be93691decce0c653f430688b7)) ## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02) diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index 6ca285bee22..ee9846363ae 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -69,7 +69,7 @@ these projects as "general project" and "Firestore project". After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) for each project. Ensure the project-level **Owner** -[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to +[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to each service account. During the creation of the service account, you should download the JSON credential file for use later. @@ -136,6 +136,9 @@ As part of the setup that follows, the following variables will be configured: - `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R". The creation of this is described below. +- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests, +in the form +"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region. - `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. Install the [gcloud command-line tool][gcloudcli] to your machine and use it to @@ -172,6 +175,7 @@ $ gcloud beta spanner instances create go-integration-test --config regional-us- $ export MY_KEYRING=some-keyring-name $ export MY_LOCATION=global +$ export MY_SINGLE_LOCATION=us-central1 # Creates a KMS keyring, in the same location as the default location for your # project's buckets. $ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION @@ -182,6 +186,18 @@ $ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --pu $ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING # Authorizes Google Cloud Storage to encrypt and decrypt using key1. $ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 + +# Create KMS Key in one region for Bigtable +$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption +# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable. +$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING +# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1 +$ gcloud kms keys add-iam-policy-binding key1 \ + --keyring $MY_KEYRING \ + --location $MY_SINGLE_LOCATION \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ + --member "${GCLOUD_TESTS_GOLANG_PROJECT_ID}@${GCLOUD_TESTS_GOLANG_PROJECT_ID}.iam.gserviceaccount.com" \ + --project $GCLOUD_TESTS_GOLANG_PROJECT_ID ``` It may be useful to add exports to your shell initialization for future use. @@ -295,8 +311,8 @@ Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by opening an issue or contacting one or more of the project maintainers. -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) +This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0, +available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/) [gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ [indvcla]: https://developers.google.com/open-source/cla/individual diff --git a/vendor/cloud.google.com/go/SECURITY.md b/vendor/cloud.google.com/go/SECURITY.md new file mode 100644 index 00000000000..8b58ae9c01a --- /dev/null +++ b/vendor/cloud.google.com/go/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). + +The Google Security Team will respond within 5 working days of your report on g.co/vulnz. + +We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. diff --git a/vendor/cloud.google.com/go/bigtable/.repo-metadata.json b/vendor/cloud.google.com/go/bigtable/.repo-metadata.json deleted file mode 100644 index f4958ef6951..00000000000 --- a/vendor/cloud.google.com/go/bigtable/.repo-metadata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "bigtable", - "name_pretty": "Cloud Bigtable API", - "product_documentation": "https://cloud.google.com/bigtable", - "client_documentation": "https://godoc.org/cloud.google.com/go/bigtable", - "release_level": "ga", - "language": "go", - "repo": "googleapis/google-cloud-go", - "distribution_name": "cloud.google.com/go/bigtable", - "api_id": "bigtable.googleapis.com", - "requires_billing": true -} diff --git a/vendor/cloud.google.com/go/bigtable/CHANGES.md b/vendor/cloud.google.com/go/bigtable/CHANGES.md index 0e98b7683b2..c981d7f9c9c 100644 --- a/vendor/cloud.google.com/go/bigtable/CHANGES.md +++ b/vendor/cloud.google.com/go/bigtable/CHANGES.md @@ -1,5 +1,10 @@ # Changes +## v1.3.0 + +- Clients now use transport/grpc.DialPool rather than Dial. + - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API. + ## v1.2.0 - Update cbt usage string. diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go index 4101a85790d..c8e625e4e5b 100644 --- a/vendor/cloud.google.com/go/bigtable/admin.go +++ b/vendor/cloud.google.com/go/bigtable/admin.go @@ -40,7 +40,6 @@ import ( gtransport "google.golang.org/api/transport/grpc" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" "google.golang.org/genproto/protobuf/field_mask" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" @@ -50,7 +49,7 @@ const adminAddr = "bigtableadmin.googleapis.com:443" // AdminClient is a client type for performing admin operations within a specific instance. type AdminClient struct { - conn *grpc.ClientConn + connPool gtransport.ConnPool tClient btapb.BigtableTableAdminClient lroClient *lroauto.OperationsClient @@ -71,12 +70,12 @@ func NewAdminClient(ctx context.Context, project, instance string, opts ...optio // Need to add scopes for long running operations (for create table & snapshots) o = append(o, option.WithScopes(cloudresourcemanager.CloudPlatformScope)) o = append(o, opts...) - conn, err := gtransport.Dial(ctx, o...) + connPool, err := gtransport.DialPool(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } - lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + lroClient, err := lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. @@ -88,8 +87,8 @@ func NewAdminClient(ctx context.Context, project, instance string, opts ...optio } return &AdminClient{ - conn: conn, - tClient: btapb.NewBigtableTableAdminClient(conn), + connPool: connPool, + tClient: btapb.NewBigtableTableAdminClient(connPool), lroClient: lroClient, project: project, instance: instance, @@ -99,7 +98,7 @@ func NewAdminClient(ctx context.Context, project, instance string, opts ...optio // Close closes the AdminClient. func (ac *AdminClient) Close() error { - return ac.conn.Close() + return ac.connPool.Close() } func (ac *AdminClient) instancePrefix() string { @@ -599,7 +598,7 @@ const instanceAdminAddr = "bigtableadmin.googleapis.com:443" // InstanceAdminClient is a client type for performing admin operations on instances. // These operations can be substantially more dangerous than those provided by AdminClient. type InstanceAdminClient struct { - conn *grpc.ClientConn + connPool gtransport.ConnPool iClient btapb.BigtableInstanceAdminClient lroClient *lroauto.OperationsClient @@ -618,12 +617,12 @@ func NewInstanceAdminClient(ctx context.Context, project string, opts ...option. // Add gRPC client interceptors to supply Google client information. No external interceptors are passed. o = append(o, btopt.ClientInterceptorOptions(nil, nil)...) o = append(o, opts...) - conn, err := gtransport.Dial(ctx, o...) + connPool, err := gtransport.DialPool(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } - lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + lroClient, err := lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. @@ -635,8 +634,8 @@ func NewInstanceAdminClient(ctx context.Context, project string, opts ...option. } return &InstanceAdminClient{ - conn: conn, - iClient: btapb.NewBigtableInstanceAdminClient(conn), + connPool: connPool, + iClient: btapb.NewBigtableInstanceAdminClient(connPool), lroClient: lroClient, project: project, @@ -646,7 +645,7 @@ func NewInstanceAdminClient(ctx context.Context, project string, opts ...option. // Close closes the InstanceAdminClient. func (iac *InstanceAdminClient) Close() error { - return iac.conn.Close() + return iac.connPool.Close() } // StorageType is the type of storage used for all tables in an instance diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go index 16ca3dfe57c..a66fc1dc771 100644 --- a/vendor/cloud.google.com/go/bigtable/bigtable.go +++ b/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -43,7 +43,7 @@ const prodAddr = "bigtable.googleapis.com:443" // // A Client is safe to use concurrently, except for its Close method. type Client struct { - conn *grpc.ClientConn + connPool gtransport.ConnPool client btpb.BigtableClient project, instance string appProfile string @@ -80,14 +80,14 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C // can cause RPCs to fail randomly. We can delete this after the issue is fixed. option.WithGRPCDialOption(grpc.WithBlock())) o = append(o, opts...) - conn, err := gtransport.Dial(ctx, o...) + connPool, err := gtransport.DialPool(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &Client{ - conn: conn, - client: btpb.NewBigtableClient(conn), + connPool: connPool, + client: btpb.NewBigtableClient(connPool), project: project, instance: instance, appProfile: config.AppProfile, @@ -96,7 +96,7 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C // Close closes the Client. func (c *Client) Close() error { - return c.conn.Close() + return c.connPool.Close() } var ( diff --git a/vendor/cloud.google.com/go/bigtable/go.mod b/vendor/cloud.google.com/go/bigtable/go.mod index 623ab4ddbee..3f6a2ae62cc 100644 --- a/vendor/cloud.google.com/go/bigtable/go.mod +++ b/vendor/cloud.google.com/go/bigtable/go.mod @@ -3,17 +3,18 @@ module cloud.google.com/go/bigtable go 1.11 require ( - cloud.google.com/go v0.51.0 - cloud.google.com/go/storage v1.5.0 // indirect - github.com/golang/protobuf v1.3.2 + cloud.google.com/go v0.52.0 + cloud.google.com/go/pubsub v1.2.0 // indirect + github.com/golang/protobuf v1.3.3 github.com/google/btree v1.0.0 github.com/google/go-cmp v0.4.0 github.com/googleapis/gax-go/v2 v2.0.5 + golang.org/x/exp v0.0.0-20200207192155-f17229e696bd // indirect golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200107162124-548cf772de50 // indirect - golang.org/x/tools v0.0.0-20200108203644-89082a384178 // indirect - google.golang.org/api v0.15.0 - google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f - google.golang.org/grpc v1.26.0 + golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 // indirect + golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 // indirect + google.golang.org/api v0.17.0 + google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce + google.golang.org/grpc v1.27.1 rsc.io/binaryregexp v0.2.0 ) diff --git a/vendor/cloud.google.com/go/bigtable/go.sum b/vendor/cloud.google.com/go/bigtable/go.sum index dccfbb675c8..80e1baa4146 100644 --- a/vendor/cloud.google.com/go/bigtable/go.sum +++ b/vendor/cloud.google.com/go/bigtable/go.sum @@ -9,18 +9,22 @@ cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0 h1:0E3eE8MX426vUOs7aHfI7aN1BrIzzzf4ccKCSfSjGmc= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0 h1:GGslhk/BU052LPlnI1vpp3fcbUs+hQ3E+Doti/3/vF8= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= @@ -45,6 +49,8 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -52,6 +58,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -93,6 +101,8 @@ go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -108,6 +118,10 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587 h1:5Uz0rkjCFu9BC9gCRN7EkwVvh golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a h1:7Wlg8L54In96HTWOaI4sreLJ6qfyGuvSau5el3fK41Y= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -122,11 +136,16 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -141,6 +160,10 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -170,14 +193,21 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 h1:JA8d3MPx/IToSyXZG/RhwYEtfrKO1Fxrqe8KrkiLXKM= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25savxmscf4+SC+ZqiCHhA= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -197,12 +227,20 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178 h1:f5gMxb6FbpY48csegk9UPd7IAHVrBD013CU7N4pWzoE= -golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a h1:7YaEqUc1tUg0yDwvdX+3U5bwrBg7u3FFAZ5D8gUs4/c= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nSRS/JrZEig0gz2BY4VNrg= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -218,6 +256,8 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -240,8 +280,13 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f h1:2wh8dWY8959cBGQvk1RD+/eQBgRYYDaZ+hT0/zsARoA= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 h1:VPpdpQkGvFicX9yo4G5oxZPi9ALBnEOZblPSa/Wa2m4= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C94IGKADBTJZjZXAd+BubWi7r9EiI= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= @@ -249,6 +294,10 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index 4fa03cae5d2..04ee0cb4430 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -5,19 +5,19 @@ go 1.11 require ( cloud.google.com/go/storage v1.10.0 github.com/golang/mock v1.5.0 - github.com/golang/protobuf v1.5.1 - github.com/google/go-cmp v0.5.5 - github.com/google/martian/v3 v3.1.0 - github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 + github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.6 + github.com/google/martian/v3 v3.2.1 + github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 github.com/googleapis/gax-go/v2 v2.0.5 github.com/jstemmer/go-junit-report v0.9.1 go.opencensus.io v0.23.0 - golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 - golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 - golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 - golang.org/x/text v0.3.5 - golang.org/x/tools v0.1.0 - google.golang.org/api v0.43.0 - google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 - google.golang.org/grpc v1.36.1 + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c + golang.org/x/text v0.3.6 + golang.org/x/tools v0.1.2 + google.golang.org/api v0.47.0 + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c + google.golang.org/grpc v1.38.0 + google.golang.org/protobuf v1.26.0 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index d0209b286d1..6ed2bf87a31 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -17,6 +17,7 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -52,6 +53,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -85,8 +87,11 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -99,13 +104,15 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -116,8 +123,9 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 h1:ub2sxhs2A0HRa2dWHavvmWxiVGXNfE9wI+gcTMwED8A= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -145,6 +153,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -180,8 +189,9 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -191,8 +201,9 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -225,8 +236,10 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -237,8 +250,9 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 h1:duBc5zuJsmJXYOVVE/6PxejI+N3AaCqKjtsoLn1Je5Q= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -285,8 +299,12 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 h1:hZR0X1kPW+nwyJ9xRxqZk1vx5RUObAPBdKVvXPDUH/E= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -294,8 +312,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -344,8 +363,10 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -371,8 +392,9 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0 h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0 h1:sQLWZQvP6jPGIP4JGPkJu4zHswrv81iobiyszr3b/0I= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -419,8 +441,10 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 h1:E7wSQBXkH3T3diucK+9Z1kjn4+/9tNG7lZLr75oOhh8= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -438,8 +462,12 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index ecb5f8efda6..fa85f8c258a 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -4,1111 +4,1358 @@ "description": "Access Approval API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/accessapproval/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/accessapproval/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", "description": "Google Analytics Admin API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/admin/apiv1alpha", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/analytics/admin/apiv1alpha", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/analytics/data/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/data/apiv1alpha", "description": "Google Analytics Data API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/data/apiv1alpha", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/analytics/data/apiv1alpha", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/apigateway/apiv1": { "distribution_name": "cloud.google.com/go/apigateway/apiv1", "description": "API Gateway API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/apigateway/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/apigateway/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/appengine/apiv1": { "distribution_name": "cloud.google.com/go/appengine/apiv1", "description": "App Engine Admin API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/appengine/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/appengine/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/area120/tables/apiv1alpha1": { "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", "description": "Area120 Tables API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/area120/tables/apiv1alpha1", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/area120/tables/apiv1alpha1", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/artifactregistry/apiv1beta2": { "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", "description": "Artifact Registry API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/artifactregistry/apiv1beta2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/artifactregistry/apiv1beta2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/asset/apiv1": { "distribution_name": "cloud.google.com/go/asset/apiv1", "description": "Cloud Asset API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/asset/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/asset/apiv1p2beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", "description": "Cloud Asset API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p2beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/asset/apiv1p2beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/asset/apiv1p5beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", "description": "Cloud Asset API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/asset/apiv1p5beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", "description": "Assured Workloads API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/assuredworkloads/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/assuredworkloads/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/automl/apiv1": { "distribution_name": "cloud.google.com/go/automl/apiv1", "description": "Cloud AutoML API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/automl/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/automl/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/automl/apiv1beta1": { "distribution_name": "cloud.google.com/go/automl/apiv1beta1", "description": "Cloud AutoML API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/automl/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/automl/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/bigquery": { "distribution_name": "cloud.google.com/go/bigquery", "description": "BigQuery", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/bigquery/connection/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", "description": "BigQuery Connection API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/connection/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/bigquery/connection/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", "description": "BigQuery Connection API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/connection/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/bigquery/datatransfer/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", "description": "BigQuery Data Transfer API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/datatransfer/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/datatransfer/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/bigquery/reservation/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", "description": "BigQuery Reservation API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/reservation/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/reservation/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/bigquery/reservation/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1", "description": "BigQuery Reservation API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/reservation/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/reservation/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/bigquery/storage/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", "description": "BigQuery Storage API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1", - "release_level": "ga" - }, - "cloud.google.com/go/bigquery/storage/apiv1alpha2": { - "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha2", - "description": "BigQuery Storage API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1alpha2", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/storage/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/bigquery/storage/apiv1beta1": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", "description": "BigQuery Storage API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/storage/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/bigquery/storage/apiv1beta2": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", "description": "BigQuery Storage API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1beta2", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/bigquery/storage/apiv1beta2", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/bigtable": { "distribution_name": "cloud.google.com/go/bigtable", "description": "Cloud BigTable", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigtable", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/billing/apiv1": { "distribution_name": "cloud.google.com/go/billing/apiv1", "description": "Cloud Billing API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/billing/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/billing/budgets/apiv1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", "description": "Cloud Billing Budget API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/billing/budgets/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/billing/budgets/apiv1beta1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", "description": "Cloud Billing Budget API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/billing/budgets/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/binaryauthorization/apiv1beta1": { "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", "description": "Binary Authorization API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/binaryauthorization/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/binaryauthorization/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/channel/apiv1": { "distribution_name": "cloud.google.com/go/channel/apiv1", "description": "Cloud Channel API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/channel/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/channel/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/cloudbuild/apiv1/v2": { "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", "description": "Cloud Build API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1/v2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/cloudbuild/apiv1/v2", + "release_level": "ga", + "library_type": "" + }, + "cloud.google.com/go/clouddms/apiv1": { + "distribution_name": "cloud.google.com/go/clouddms/apiv1", + "description": "Database Migration API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/clouddms/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/cloudtasks/apiv2": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", "description": "Cloud Tasks API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/cloudtasks/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/cloudtasks/apiv2beta2": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", "description": "Cloud Tasks API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2beta2", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/cloudtasks/apiv2beta2", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/cloudtasks/apiv2beta3": { "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", "description": "Cloud Tasks API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2beta3", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/cloudtasks/apiv2beta3", + "release_level": "beta", + "library_type": "" + }, + "cloud.google.com/go/compute/metadata": { + "distribution_name": "cloud.google.com/go/compute/metadata", + "description": "Service Metadata API", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/compute/metadata", + "release_level": "ga", + "library_type": "CORE" }, "cloud.google.com/go/container/apiv1": { "distribution_name": "cloud.google.com/go/container/apiv1", "description": "Kubernetes Engine API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/container/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/container/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/containeranalysis/apiv1beta1": { "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", "description": "Container Analysis API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/containeranalysis/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/datacatalog/apiv1": { "distribution_name": "cloud.google.com/go/datacatalog/apiv1", "description": "Google Cloud Data Catalog API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/datacatalog/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/datacatalog/apiv1beta1": { "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", "description": "Google Cloud Data Catalog API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/datacatalog/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/datalabeling/apiv1beta1": { "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", "description": "Data Labeling API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/datalabeling/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/datalabeling/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/dataproc/apiv1": { "distribution_name": "cloud.google.com/go/dataproc/apiv1", "description": "Cloud Dataproc API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dataproc/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/dataproc/apiv1beta2": { "distribution_name": "cloud.google.com/go/dataproc/apiv1beta2", "description": "Cloud Dataproc API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1beta2", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dataproc/apiv1beta2", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/dataqna/apiv1alpha": { "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", "description": "Data QnA API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataqna/apiv1alpha", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dataqna/apiv1alpha", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/datastore": { "distribution_name": "cloud.google.com/go/datastore", "description": "Cloud Datastore", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/datastore/admin/apiv1": { "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", "description": "Cloud Datastore API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore/admin/apiv1", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/datastore/admin/apiv1", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/debugger/apiv2": { "distribution_name": "cloud.google.com/go/debugger/apiv2", "description": "Stackdriver Debugger API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/debugger/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/dialogflow/apiv2": { "distribution_name": "cloud.google.com/go/dialogflow/apiv2", "description": "Dialogflow API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dialogflow/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/dialogflow/cx/apiv3": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", "description": "Dialogflow API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/cx/apiv3", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dialogflow/cx/apiv3", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/dialogflow/cx/apiv3beta1": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", "description": "Dialogflow API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/cx/apiv3beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dialogflow/cx/apiv3beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/dlp/apiv2": { "distribution_name": "cloud.google.com/go/dlp/apiv2", "description": "Cloud Data Loss Prevention (DLP) API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/dlp/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dlp/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/documentai/apiv1": { "distribution_name": "cloud.google.com/go/documentai/apiv1", "description": "Cloud Document AI API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/documentai/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/documentai/apiv1beta3": { "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", "description": "Cloud Document AI API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1beta3", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/documentai/apiv1beta3", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/domains/apiv1beta1": { "distribution_name": "cloud.google.com/go/domains/apiv1beta1", "description": "Cloud Domains API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/domains/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/domains/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting", + "release_level": "beta", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/errorreporting/apiv1beta1": { "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", "description": "Error Reporting API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting/apiv1beta1", + "release_level": "beta", + "library_type": "" + }, + "cloud.google.com/go/essentialcontacts/apiv1": { + "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", + "description": "Essential Contacts API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/essentialcontacts/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/firestore": { "distribution_name": "cloud.google.com/go/firestore", "description": "Cloud Firestore API", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/firestore/apiv1": { "distribution_name": "cloud.google.com/go/firestore/apiv1", "description": "Cloud Firestore API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/firestore/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/firestore/apiv1/admin": { "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", "description": "Google Cloud Firestore Admin API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore/apiv1/admin", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/firestore/apiv1/admin", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/functions/apiv1": { "distribution_name": "cloud.google.com/go/functions/apiv1", "description": "Cloud Functions API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/functions/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/functions/apiv1", + "release_level": "ga", + "library_type": "" + }, + "cloud.google.com/go/functions/metadata": { + "distribution_name": "cloud.google.com/go/functions/metadata", + "description": "Cloud Functions", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/functions/metadata", + "release_level": "alpha", + "library_type": "CORE" }, "cloud.google.com/go/gaming/apiv1": { "distribution_name": "cloud.google.com/go/gaming/apiv1", "description": "Game Services API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/gaming/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/gaming/apiv1beta": { "distribution_name": "cloud.google.com/go/gaming/apiv1beta", "description": "Game Services API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/gaming/apiv1beta", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/gkehub/apiv1beta1": { "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", "description": "GKE Hub", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/gkehub/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/gkehub/apiv1beta1", + "release_level": "beta", + "library_type": "" + }, + "cloud.google.com/go/gsuiteaddons/apiv1": { + "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", + "description": "Google Workspace Add-ons API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/gsuiteaddons/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/iam": { "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/iam", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/iam", + "release_level": "ga", + "library_type": "CORE" }, "cloud.google.com/go/iam/credentials/apiv1": { "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", "description": "IAM Service Account Credentials API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/iam/credentials/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/iam/credentials/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/iot/apiv1": { "distribution_name": "cloud.google.com/go/iot/apiv1", "description": "Cloud IoT API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/iot/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/iot/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/kms/apiv1": { "distribution_name": "cloud.google.com/go/kms/apiv1", "description": "Cloud Key Management Service (KMS) API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/kms/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/kms/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/language/apiv1": { "distribution_name": "cloud.google.com/go/language/apiv1", "description": "Cloud Natural Language API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/language/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/language/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/language/apiv1beta2": { "distribution_name": "cloud.google.com/go/language/apiv1beta2", - "description": "Cloud Natural Language API", + "description": "Google Cloud Natural Language API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/language/apiv1beta2", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/language/apiv1beta2", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/logging": { "distribution_name": "cloud.google.com/go/logging", "description": "Cloud Logging API", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/logging/apiv2": { "distribution_name": "cloud.google.com/go/logging/apiv2", "description": "Cloud Logging API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/logging/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/longrunning/autogen": { "distribution_name": "cloud.google.com/go/longrunning/autogen", "description": "Long Running Operations API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/longrunning/autogen", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/longrunning/autogen", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/managedidentities/apiv1": { "distribution_name": "cloud.google.com/go/managedidentities/apiv1", "description": "Managed Service for Microsoft Active Directory API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/managedidentities/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/managedidentities/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", "description": "Media Translation API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/mediatranslation/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/mediatranslation/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/memcache/apiv1": { "distribution_name": "cloud.google.com/go/memcache/apiv1", "description": "Cloud Memorystore for Memcached API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/memcache/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/memcache/apiv1beta2": { "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", "description": "Cloud Memorystore for Memcached API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1beta2", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/memcache/apiv1beta2", + "release_level": "beta", + "library_type": "" + }, + "cloud.google.com/go/metastore/apiv1": { + "distribution_name": "cloud.google.com/go/metastore/apiv1", + "description": "Dataproc Metastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/metastore/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/metastore/apiv1alpha": { "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", "description": "Dataproc Metastore API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/metastore/apiv1alpha", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/metastore/apiv1alpha", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/metastore/apiv1beta": { "distribution_name": "cloud.google.com/go/metastore/apiv1beta", "description": "Dataproc Metastore API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/metastore/apiv1beta", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/metastore/apiv1beta", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/monitoring/apiv3/v2": { "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", "description": "Cloud Monitoring API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/monitoring/apiv3/v2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/monitoring/dashboard/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", "description": "Cloud Monitoring API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/monitoring/dashboard/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/networkconnectivity/apiv1alpha1": { "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", "description": "Network Connectivity API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/networkconnectivity/apiv1alpha1", - "release_level": "alpha" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/networkconnectivity/apiv1alpha1", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/notebooks/apiv1beta1": { "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", "description": "Notebooks API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/notebooks/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/notebooks/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/orgpolicy/apiv2": { "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", "description": "Organization Policy API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/orgpolicy/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/orgpolicy/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", "description": "OS Config API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/osconfig/agentendpoint/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", "description": "Cloud OS Config API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1beta", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/osconfig/agentendpoint/apiv1beta", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/osconfig/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/apiv1", "description": "OS Config API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/osconfig/apiv1", + "release_level": "ga", + "library_type": "" + }, + "cloud.google.com/go/osconfig/apiv1alpha": { + "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", + "description": "OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/osconfig/apiv1alpha", + "release_level": "alpha", + "library_type": "" }, "cloud.google.com/go/osconfig/apiv1beta": { "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", "description": "Cloud OS Config API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1beta", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/osconfig/apiv1beta", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/oslogin/apiv1": { "distribution_name": "cloud.google.com/go/oslogin/apiv1", "description": "Cloud OS Login API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/oslogin/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/oslogin/apiv1beta": { "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", "description": "Cloud OS Login API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1beta", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/oslogin/apiv1beta", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/phishingprotection/apiv1beta1": { "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", "description": "Phishing Protection API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/phishingprotection/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/policytroubleshooter/apiv1": { "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", "description": "Policy Troubleshooter API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/policytroubleshooter/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/policytroubleshooter/apiv1", + "release_level": "ga", + "library_type": "" + }, + "cloud.google.com/go/privatecatalog/apiv1beta1": { + "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", + "description": "Cloud Private Catalog API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/privatecatalog/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/profiler": { "distribution_name": "cloud.google.com/go/profiler", "description": "Cloud Profiler", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/profiler", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/profiler", + "release_level": "ga", + "library_type": "AGENT" }, "cloud.google.com/go/pubsub": { "distribution_name": "cloud.google.com/go/pubsub", "description": "Cloud PubSub", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsub/apiv1": { "distribution_name": "cloud.google.com/go/pubsub/apiv1", "description": "Cloud Pub/Sub API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/pubsub/apiv1", + "release_level": "ga", + "library_type": "" + }, + "cloud.google.com/go/pubsublite": { + "distribution_name": "cloud.google.com/go/pubsublite", + "description": "Cloud PubSub Lite", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", + "release_level": "beta", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsublite/apiv1": { "distribution_name": "cloud.google.com/go/pubsublite/apiv1", "description": "Pub/Sub Lite API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsublite/apiv1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/pubsublite/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/recaptchaenterprise/apiv1": { "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/recaptchaenterprise/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/recaptchaenterprise/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/recommendationengine/apiv1beta1": { "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", "description": "Recommendations AI", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommendationengine/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/recommendationengine/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/recommender/apiv1": { "distribution_name": "cloud.google.com/go/recommender/apiv1", "description": "Recommender API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommender/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/recommender/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/recommender/apiv1beta1": { "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", "description": "Recommender API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/recommender/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/redis/apiv1": { "distribution_name": "cloud.google.com/go/redis/apiv1", "description": "Google Cloud Memorystore for Redis API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/redis/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/redis/apiv1beta1": { "distribution_name": "cloud.google.com/go/redis/apiv1beta1", "description": "Google Cloud Memorystore for Redis API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/redis/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/resourcemanager/apiv2": { "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", "description": "Cloud Resource Manager API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcemanager/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/resourcemanager/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/resourcesettings/apiv1": { "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", "description": "Resource Settings API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcesettings/apiv1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/resourcesettings/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/retail/apiv2": { "distribution_name": "cloud.google.com/go/retail/apiv2", "description": "Retail API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/retail/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/retail/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/rpcreplay": { "distribution_name": "cloud.google.com/go/rpcreplay", "description": "RPC Replay", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/rpcreplay", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", + "release_level": "ga", + "library_type": "OTHER" }, "cloud.google.com/go/scheduler/apiv1": { "distribution_name": "cloud.google.com/go/scheduler/apiv1", "description": "Cloud Scheduler API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/scheduler/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/scheduler/apiv1beta1": { "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", "description": "Cloud Scheduler API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/scheduler/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/secretmanager/apiv1": { "distribution_name": "cloud.google.com/go/secretmanager/apiv1", "description": "Secret Manager API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/secretmanager/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/secretmanager/apiv1beta1": { "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", "description": "Secret Manager API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/secretmanager/apiv1beta1", + "release_level": "beta", + "library_type": "" + }, + "cloud.google.com/go/security/privateca/apiv1": { + "distribution_name": "cloud.google.com/go/security/privateca/apiv1", + "description": "Certificate Authority API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/security/privateca/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/security/privateca/apiv1beta1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1beta1", "description": "Certificate Authority API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/security/privateca/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/security/privateca/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/securitycenter/apiv1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1", "description": "Security Command Center API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/securitycenter/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/securitycenter/apiv1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", "description": "Security Command Center API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/securitycenter/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/securitycenter/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", "description": "Security Command Center API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1p1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/securitycenter/apiv1p1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/securitycenter/settings/apiv1beta1": { "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", "description": "Cloud Security Command Center API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/settings/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/securitycenter/settings/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/servicecontrol/apiv1": { "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", "description": "Service Control API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicecontrol/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/servicecontrol/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/servicedirectory/apiv1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", "description": "Service Directory API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/servicedirectory/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/servicedirectory/apiv1beta1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", "description": "Service Directory API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/servicedirectory/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/servicemanagement/apiv1": { "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", "description": "Service Management API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicemanagement/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/servicemanagement/apiv1", + "release_level": "ga", + "library_type": "" + }, + "cloud.google.com/go/serviceusage/apiv1": { + "distribution_name": "cloud.google.com/go/serviceusage/apiv1", + "description": "Service Usage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/serviceusage/apiv1", + "release_level": "beta", + "library_type": "" + }, + "cloud.google.com/go/shell/apiv1": { + "distribution_name": "cloud.google.com/go/shell/apiv1", + "description": "Cloud Shell API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/shell/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/spanner": { "distribution_name": "cloud.google.com/go/spanner", "description": "Cloud Spanner", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/spanner/admin/database/apiv1": { "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", "description": "Cloud Spanner Database Admin API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/admin/database/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/spanner/admin/database/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/spanner/admin/instance/apiv1": { "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", "description": "Cloud Spanner Instance Admin API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/admin/instance/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/spanner/admin/instance/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/spanner/apiv1": { "distribution_name": "cloud.google.com/go/spanner/apiv1", "description": "Cloud Spanner API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/spanner/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/speech/apiv1": { "distribution_name": "cloud.google.com/go/speech/apiv1", "description": "Cloud Speech-to-Text API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/speech/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/speech/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/speech/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", "description": "Cloud Speech-to-Text API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/speech/apiv1p1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/speech/apiv1p1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/storage": { "distribution_name": "cloud.google.com/go/storage", "description": "Cloud Storage (GCS)", "language": "Go", "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/storage", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", + "release_level": "ga", + "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/talent/apiv4": { "distribution_name": "cloud.google.com/go/talent/apiv4", "description": "Cloud Talent Solution API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/talent/apiv4", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/talent/apiv4beta1": { "distribution_name": "cloud.google.com/go/talent/apiv4beta1", "description": "Cloud Talent Solution API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/talent/apiv4beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/texttospeech/apiv1": { "distribution_name": "cloud.google.com/go/texttospeech/apiv1", "description": "Cloud Text-to-Speech API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/texttospeech/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/trace/apiv1": { "distribution_name": "cloud.google.com/go/trace/apiv1", "description": "Stackdriver Trace API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/trace/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/trace/apiv2": { "distribution_name": "cloud.google.com/go/trace/apiv2", "description": "Stackdriver Trace API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace/apiv2", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/trace/apiv2", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/translate/apiv3": { "distribution_name": "cloud.google.com/go/translate/apiv3", "description": "Cloud Translation API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/translate/apiv3", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/translate/apiv3", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/video/transcoder/apiv1beta1": { "distribution_name": "cloud.google.com/go/video/transcoder/apiv1beta1", "description": "Transcoder API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/video/transcoder/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/video/transcoder/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/videointelligence/apiv1": { "distribution_name": "cloud.google.com/go/videointelligence/apiv1", "description": "Cloud Video Intelligence API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/videointelligence/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/videointelligence/apiv1beta2": { "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", "description": "Google Cloud Video Intelligence API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/videointelligence/apiv1beta2", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/vision/apiv1": { "distribution_name": "cloud.google.com/go/vision/apiv1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/vision/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/vision/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/vision/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/vision/apiv1p1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/vision/apiv1p1beta1", + "release_level": "beta", + "library_type": "" + }, + "cloud.google.com/go/vpcaccess/apiv1": { + "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", + "description": "Serverless VPC Access API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/vpcaccess/apiv1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/webrisk/apiv1": { "distribution_name": "cloud.google.com/go/webrisk/apiv1", "description": "Web Risk API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/webrisk/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/webrisk/apiv1beta1": { "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", "description": "Web Risk API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/webrisk/apiv1beta1", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/websecurityscanner/apiv1": { "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", "description": "Web Security Scanner API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/websecurityscanner/apiv1", - "release_level": "ga" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/websecurityscanner/apiv1", + "release_level": "ga", + "library_type": "" }, "cloud.google.com/go/workflows/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/apiv1beta", "description": "Workflows API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/apiv1beta", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/workflows/apiv1beta", + "release_level": "beta", + "library_type": "" }, "cloud.google.com/go/workflows/executions/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", "description": "Workflow Executions API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/executions/apiv1beta", - "release_level": "beta" + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/workflows/executions/apiv1beta", + "release_level": "beta", + "library_type": "" } } diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go index 89928a81b06..c512294d89b 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -17,6 +17,8 @@ // Package longrunning is an auto-generated package for the // Long Running Operations API. // +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// // Use of Context // // The ctx passed to NewClient is used for authentication requests and @@ -26,7 +28,7 @@ // To close the open connection, use the Close() method. // // For information about setting deadlines, reusing contexts, and more -// please visit pkg.go.dev/cloud.google.com/go. +// please visit https://pkg.go.dev/cloud.google.com/go. package longrunning // import "cloud.google.com/go/longrunning/autogen" import ( @@ -46,7 +48,7 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20210402" +const versionClient = "20210602" func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index 08c81fc8ab8..3fd6b76bf5c 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -46,7 +46,7 @@ type OperationsCallOptions struct { WaitOperation []gax.CallOption } -func defaultOperationsClientOptions() []option.ClientOption { +func defaultOperationsGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("longrunning.googleapis.com:443"), internaloption.WithDefaultMTLSEndpoint("longrunning.mtls.googleapis.com:443"), @@ -108,27 +108,138 @@ func defaultOperationsCallOptions() *OperationsCallOptions { } } +// internalOperationsClient is an interface that defines the methods availaible from Long Running Operations API. +type internalOperationsClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator + GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) + DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error + CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error + WaitOperation(context.Context, *longrunningpb.WaitOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) +} + // OperationsClient is a client for interacting with Long Running Operations API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. type OperationsClient struct { + // The internal transport-dependent client. + internalClient internalOperationsClient + + // The call options for this service. + CallOptions *OperationsCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *OperationsClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *OperationsClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *OperationsClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn’t support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. To +// override the binding, API services can add a binding such as +// "/v1/{name=users/*}/operations" to their service configuration. +// For backwards compatibility, the default name includes the operations +// collection id, however overriding users must ensure the name binding +// is the parent resource, without the operations collection id. +func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + return c.internalClient.ListOperations(ctx, req, opts...) +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.GetOperation(ctx, req, opts...) +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteOperation(ctx, req, opts...) +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an Operation.error value with a google.rpc.Status.code of 1, +// corresponding to Code.CANCELLED. +func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + return c.internalClient.CancelOperation(ctx, req, opts...) +} + +// WaitOperation waits until the specified long-running operation is done or reaches at most +// a specified timeout, returning the latest state. If the operation is +// already done, the latest state is immediately returned. If the timeout +// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC +// timeout is used. If the server does not support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// Note that this method is on a best-effort basis. It may return the latest +// state before the specified timeout (including immediately), meaning even an +// immediate response is no guarantee that the operation is done. +func (c *OperationsClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.WaitOperation(ctx, req, opts...) +} + +// operationsGRPCClient is a client for interacting with Long Running Operations API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type operationsGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE disableDeadlines bool + // Points back to the CallOptions field of the containing OperationsClient + CallOptions **OperationsCallOptions + // The gRPC API client. operationsClient longrunningpb.OperationsClient - // The call options for this service. - CallOptions *OperationsCallOptions - // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewOperationsClient creates a new operations client. +// NewOperationsClient creates a new operations client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // Manages long-running operations with an API service. // @@ -140,8 +251,7 @@ type OperationsClient struct { // returns long-running operations should implement the Operations interface // so developers can have a consistent client experience. func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { - clientOpts := defaultOperationsClientOptions() - + clientOpts := defaultOperationsGRPCClientOptions() if newOperationsClientHook != nil { hookOpts, err := newOperationsClientHook(ctx, clientHookParams{}) if err != nil { @@ -159,54 +269,47 @@ func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*Ope if err != nil { return nil, err } - c := &OperationsClient{ + client := OperationsClient{CallOptions: defaultOperationsCallOptions()} + + c := &operationsGRPCClient{ connPool: connPool, disableDeadlines: disableDeadlines, - CallOptions: defaultOperationsCallOptions(), - operationsClient: longrunningpb.NewOperationsClient(connPool), + CallOptions: &client.CallOptions, } c.setGoogleClientInfo() - return c, nil + client.internalClient = c + + return &client, nil } // Connection returns a connection to the API service. // // Deprecated. -func (c *OperationsClient) Connection() *grpc.ClientConn { +func (c *operationsGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *OperationsClient) Close() error { - return c.connPool.Close() -} - // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *OperationsClient) setGoogleClientInfo(keyval ...string) { +func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// ListOperations lists operations that match the specified filter in the request. If the -// server doesn’t support this method, it returns UNIMPLEMENTED. -// -// NOTE: the name binding allows API services to override the binding -// to use different resource name schemes, such as users/*/operations. To -// override the binding, API services can add a binding such as -// "/v1/{name=users/*}/operations" to their service configuration. -// For backwards compatibility, the default name includes the operations -// collection id, however overriding users must ensure the name binding -// is the parent resource, without the operations collection id. -func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *operationsGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) + opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...) it := &OperationIterator{} req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { @@ -243,10 +346,7 @@ func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningp return it } -// GetOperation gets the latest state of a long-running operation. Clients can use this -// method to poll the operation result at intervals as recommended by the API -// service. -func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { +func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 10000*time.Millisecond) defer cancel() @@ -254,7 +354,7 @@ func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb. } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -267,11 +367,7 @@ func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb. return resp, nil } -// DeleteOperation deletes a long-running operation. This method indicates that the client is -// no longer interested in the operation result. It does not cancel the -// operation. If the server doesn’t support this method, it returns -// google.rpc.Code.UNIMPLEMENTED. -func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { +func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 10000*time.Millisecond) defer cancel() @@ -279,7 +375,7 @@ func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunning } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...) + opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...) @@ -288,17 +384,7 @@ func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunning return err } -// CancelOperation starts asynchronous cancellation on a long-running operation. The server -// makes a best effort to cancel the operation, but success is not -// guaranteed. If the server doesn’t support this method, it returns -// google.rpc.Code.UNIMPLEMENTED. Clients can use -// Operations.GetOperation or -// other methods to check whether the cancellation succeeded or whether the -// operation completed despite cancellation. On successful cancellation, -// the operation is not deleted; instead, it becomes an operation with -// an Operation.error value with a google.rpc.Status.code of 1, -// corresponding to Code.CANCELLED. -func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { +func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 10000*time.Millisecond) defer cancel() @@ -306,7 +392,7 @@ func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunning } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) + opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...) @@ -315,18 +401,9 @@ func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunning return err } -// WaitOperation waits until the specified long-running operation is done or reaches at most -// a specified timeout, returning the latest state. If the operation is -// already done, the latest state is immediately returned. If the timeout -// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC -// timeout is used. If the server does not support this method, it returns -// google.rpc.Code.UNIMPLEMENTED. -// Note that this method is on a best-effort basis. It may return the latest -// state before the specified timeout (including immediately), meaning even an -// immediate response is no guarantee that the operation is done. -func (c *OperationsClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { +func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.WaitOperation[0:len(c.CallOptions.WaitOperation):len(c.CallOptions.WaitOperation)], opts...) + opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 1e34f813dff..a07649e9063 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -302,6 +302,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -637,7 +638,19 @@ var awsPartition = partition{ "api.fleethub.iot": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "api.mediatailor": service{ @@ -824,6 +837,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "apprunner": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "appstream2": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -1592,11 +1615,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-idp": service{ @@ -1637,11 +1661,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-sync": service{ @@ -2840,8 +2865,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3161,9 +3189,27 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "forecastquery": service{ @@ -3176,9 +3222,27 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "fsx": service{ @@ -3392,6 +3456,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "af-south-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, @@ -4068,6 +4133,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -5043,6 +5109,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -5070,9 +5137,27 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "ram": service{ @@ -5389,6 +5474,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6090,6 +6176,61 @@ var awsPartition = partition{ }, }, }, + "servicecatalog-appregistry": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "servicediscovery": service{ Endpoints: endpoints{ @@ -6158,9 +6299,27 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "shield": service{ @@ -7014,6 +7173,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-northeast-3": endpoint{ + Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "ap-south-1": endpoint{ Hostname: "waf-regional.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -7098,6 +7263,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "fips-ap-northeast-3": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "fips-ap-south-1": endpoint{ Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -8090,6 +8261,13 @@ var awscnPartition = partition{ }, }, }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "servicediscovery": service{ Endpoints: endpoints{ @@ -8227,6 +8405,42 @@ var awscnPartition = partition{ }, }, }, + "transfer": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + "fips-cn-north-1": endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "workspaces": service{ Endpoints: endpoints{ @@ -9790,6 +10004,25 @@ var awsusgovPartition = partition{ }, }, }, + "servicecatalog-appregistry": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "servicequotas": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -10302,6 +10535,18 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "fips-us-iso-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, "elasticloadbalancing": service{ Endpoints: endpoints{ @@ -10417,6 +10662,12 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "ram": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index d597c6ead55..fb0a68fce3e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -129,12 +129,27 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, httpReq, _ := http.NewRequest(method, "", nil) var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + httpReq.URL, err = url.Parse(clientInfo.Endpoint) if err != nil { httpReq.URL = &url.URL{} err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } + if len(operation.HTTPPath) != 0 { + opHTTPPath := operation.HTTPPath + var opQueryString string + if idx := strings.Index(opHTTPPath, "?"); idx >= 0 { + opQueryString = opHTTPPath[idx+1:] + opHTTPPath = opHTTPPath[:idx] + } + + if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") { + opHTTPPath = opHTTPPath[1:] + } + httpReq.URL.Path += opHTTPPath + httpReq.URL.RawQuery = opQueryString + } + r := &Request{ Config: cfg, ClientInfo: clientInfo, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 3ddd4e51282..3efdac29ff4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -101,13 +101,6 @@ func resolveCredsFromProfile(cfg *aws.Config, sharedCfg.Creds, ) - case sharedCfg.hasSSOConfiguration(): - creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) - - case len(sharedCfg.CredentialProcess) != 0: - // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) - case len(sharedCfg.CredentialSource) != 0: creds, err = resolveCredsFromSource(cfg, envCfg, sharedCfg, handlers, sessOpts, @@ -123,6 +116,13 @@ func resolveCredsFromProfile(cfg *aws.Config, sharedCfg.RoleSessionName, ) + case sharedCfg.hasSSOConfiguration(): + creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + default: // Fallback to default credentials provider, include mock errors for // the credential chain so user can identify why credentials failed to diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index c3f38b6ec07..42b16a7db9e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -401,7 +401,6 @@ func (cfg *sharedConfig) validateCredentialType() error { len(cfg.CredentialSource) != 0, len(cfg.CredentialProcess) != 0, len(cfg.WebIdentityTokenFile) != 0, - cfg.hasSSOConfiguration(), ) { return ErrSharedConfigSourceCollision } @@ -459,6 +458,10 @@ func (cfg *sharedConfig) clearCredentialOptions() { cfg.CredentialProcess = "" cfg.WebIdentityTokenFile = "" cfg.Creds = credentials.Value{} + cfg.SSOAccountID = "" + cfg.SSORegion = "" + cfg.SSORoleName = "" + cfg.SSOStartURL = "" } func (cfg *sharedConfig) clearAssumeRoleOptions() { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 0f81ba713b9..938fdfd135a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.38.35" +const SDKVersion = "1.38.60" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go index 3079e4ab0e2..216c4baabfe 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -48,6 +48,10 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} } + if strings.HasPrefix(a.Region, "fips-") || strings.HasSuffix(a.Region, "-fips") { + return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + if len(a.Resource) == 0 { return nil, InvalidARNError{ARN: a, Reason: "resource not set"} } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go index e756b2f8733..4290ff67601 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go @@ -71,6 +71,8 @@ func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error } // NewInvalidARNWithFIPSError ARN not supported for FIPS region +// +// Deprecated: FIPS will not appear in the ARN region component. func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { return InvalidARNError{ message: "resource ARN not supported for FIPS region", @@ -155,6 +157,17 @@ func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, c } } +// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS +func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "use of ARN is not supported when client or request is configured for FIPS", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + // NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { return ConfigurationError{ diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go index 9f70a64ecff..2091ba6ba31 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -31,6 +31,8 @@ func (r ResourceRequest) UseFIPS() bool { } // ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +// +// Deprecated: FIPS pseudo-regions will not be in the ARN func (r ResourceRequest) ResourceConfiguredForFIPS() bool { return IsFIPS(r.ARN().Region) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 6d15bad28f7..e23d94b1a0d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -356,9 +356,8 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // use the s3:x-amz-metadata-directive condition key to enforce certain metadata // behavior when objects are uploaded. For more information, see Specifying // Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) -// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific -// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition +// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). // // x-amz-copy-source-if Headers // @@ -422,7 +421,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // You can use the CopyObject action to change the storage class of an object // that is already stored in Amazon S3 using the StorageClass parameter. For // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 Service Developer Guide. +// in the Amazon S3 User Guide. // // Versioning // @@ -535,7 +534,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // become the bucket owner. // // Not every string is an acceptable bucket name. For information about bucket -// naming restrictions, see Working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). // // If you want to create an Amazon S3 on Outposts bucket, see Create Bucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). @@ -723,10 +722,11 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // by using CreateMultipartUpload. // // To perform a multipart upload with encryption using an AWS KMS CMK, the requester -// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, -// and kms:DescribeKey actions on the key. These permissions are required because -// Amazon S3 must decrypt and read data from the encrypted file parts before -// it completes the multipart upload. +// must have permission to the kms:Decrypt and kms:GenerateDataKey* actions +// on the key. These permissions are required because Amazon S3 must decrypt +// and read data from the encrypted file parts before it completes the multipart +// upload. For more information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// in the Amazon S3 User Guide. // // If your AWS Identity and Access Management (IAM) user or role is in the same // AWS account as the AWS KMS CMK, then you must have these permissions on the @@ -1835,7 +1835,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // propagate. // // For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 Developer Guide. +// in the Amazon S3 User Guide. // // The following operations are related to DeleteBucketReplication: // @@ -6497,12 +6497,13 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // ListObjectsV2 API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1,000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure -// to design your application to parse the contents of the response and handle -// it appropriately. Objects are returned sorted in an ascending order of the -// respective key names in the list. +// Returns some or all (up to 1,000) of the objects in a bucket with each request. +// You can use the request parameters as selection criteria to return a subset +// of the objects in a bucket. A 200 OK response can contain valid or invalid +// XML. Make sure to design your application to parse the contents of the response +// and handle it appropriately. Objects are returned sorted in an ascending +// order of the respective key names in the list. For more information about +// listing objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) // // To use this operation, you must have READ access to the bucket. // @@ -7816,7 +7817,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see -// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). // // Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, or a combination of both. @@ -8587,7 +8588,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // // Creates a replication configuration or replaces an existing one. For more // information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 Developer Guide. +// in the Amazon S3 User Guide. // // To perform this operation, the user or role performing the action must have // the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) @@ -8814,11 +8815,12 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // according to resources with the same tag key values. For example, you can // tag several resources with a specific application name, and then organize // your billing information to see the total cost of that application across -// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). // -// Within a bucket, if you add a tag that has the same key as an existing tag, -// the new value overwrites the old value. For more information, see Using Cost -// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// When this operation sets the tags for a bucket, it will overwrite any current +// tags the bucket already has. You cannot use this operation to add tags to +// an existing list of tags. // // To use this operation, you must have permissions to perform the s3:PutBucketTagging // action. The bucket owner has this permission by default and can grant this @@ -9229,7 +9231,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // Depending on performance needs, you can specify a different Storage Class. // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 Service Developer Guide. +// in the Amazon S3 User Guide. // // Versioning // @@ -9339,7 +9341,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // have an existing application that updates a bucket ACL using the request // body, you can continue to use that approach. For more information, see Access // Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// in the Amazon S3 Developer Guide. +// in the Amazon S3 User Guide. // // Access Permissions // @@ -10997,7 +10999,7 @@ type AbortMultipartUploadInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -11025,7 +11027,7 @@ type AbortMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID that identifies the multipart upload. @@ -11242,7 +11244,7 @@ type AccessControlTranslation struct { // Specifies the replica ownership. For default and valid values, see PUT bucket // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon Simple Storage Service API Reference. + // in the Amazon S3 API Reference. // // Owner is a required field Owner *string `type:"string" required:"true" enum:"OwnerOverride"` @@ -11693,7 +11695,7 @@ type BucketLoggingStatus struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to // all log object keys for a bucket. For more information, see PUT Bucket logging // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon Simple Storage Service API Reference. + // in the Amazon S3 API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -12168,7 +12170,7 @@ type CompleteMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // ID for the initiated multipart upload. @@ -12291,7 +12293,7 @@ type CompleteMultipartUploadOutput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -12577,7 +12579,7 @@ type CopyObjectInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -12735,7 +12737,7 @@ type CopyObjectInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use to when encrypting the object (for example, @@ -12764,7 +12766,7 @@ type CopyObjectInput struct { // or using SigV4. For information about configuring using any of the officially // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -12776,7 +12778,7 @@ type CopyObjectInput struct { // Depending on performance needs, you can specify a different Storage Class. // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 Service Developer Guide. + // in the Amazon S3 User Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object destination object this value must be used in @@ -13358,7 +13360,10 @@ type CreateBucketInput struct { // Allows grantee to read the bucket ACL. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - // Allows grantee to create, overwrite, and delete any object in the bucket. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` // Allows grantee to write the ACL for the applicable bucket. @@ -13494,7 +13499,7 @@ type CreateMultipartUploadInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -13583,7 +13588,7 @@ type CreateMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use to when encrypting the object (for example, @@ -13612,7 +13617,7 @@ type CreateMultipartUploadInput struct { // KMS will fail if not made via SSL or using SigV4. For information about configuring // using any of the officially supported AWS SDKs and AWS CLI, see Specifying // the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -13624,7 +13629,7 @@ type CreateMultipartUploadInput struct { // Depending on performance needs, you can specify a different Storage Class. // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 Service Developer Guide. + // in the Amazon S3 User Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -13908,7 +13913,7 @@ type CreateMultipartUploadOutput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -15613,7 +15618,7 @@ type DeleteObjectInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -15651,7 +15656,7 @@ type DeleteObjectInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -15819,7 +15824,7 @@ type DeleteObjectTaggingInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -15970,7 +15975,7 @@ type DeleteObjectsInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -16009,7 +16014,7 @@ type DeleteObjectsInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -16333,7 +16338,7 @@ type Destination struct { // the destination bucket by specifying the AccessControlTranslation property, // this is the account ID of the destination bucket owner. For more information, // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to @@ -16361,7 +16366,7 @@ type Destination struct { // // For valid values, see the StorageClass element of the PUT Bucket replication // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon Simple Storage Service API Reference. + // action in the Amazon S3 API Reference. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -16468,8 +16473,8 @@ type Encryption struct { // If the encryption type is aws:kms, this optional value specifies the ID of // the symmetric customer managed AWS KMS CMK to use for encryption of job results. - // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric - // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // Amazon S3 only supports symmetric CMKs. For more information, see Using symmetric + // and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the AWS Key Management Service Developer Guide. KMSKeyId *string `type:"string" sensitive:"true"` } @@ -16520,11 +16525,11 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer - // master key (CMK) stored in AWS Key Management Service (KMS) for the destination - // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only - // supports symmetric customer managed CMKs. For more information, see Using - // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // Specifies the ID (Key ARN or Alias ARN) of the customer managed AWS KMS key + // stored in AWS Key Management Service (KMS) for the destination bucket. Amazon + // S3 uses this key to encrypt replica objects. Amazon S3 only supports symmetric, + // customer managed KMS keys. For more information, see Using symmetric and + // asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the AWS Key Management Service Developer Guide. ReplicaKmsKeyID *string `type:"string"` } @@ -17035,7 +17040,7 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { // Optional configuration to replicate existing source bucket objects. For more // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) -// in the Amazon S3 Developer Guide. +// in the Amazon S3 User Guide. type ExistingObjectReplication struct { _ struct{} `type:"structure"` @@ -18337,7 +18342,7 @@ type GetBucketLoggingOutput struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to // all log object keys for a bucket. For more information, see PUT Bucket logging // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon Simple Storage Service API Reference. + // in the Amazon S3 API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -19490,7 +19495,7 @@ type GetObjectAclInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -19510,7 +19515,7 @@ type GetObjectAclInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -19664,7 +19669,7 @@ type GetObjectInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -19720,7 +19725,7 @@ type GetObjectInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Sets the Cache-Control header of the response. @@ -19964,7 +19969,7 @@ type GetObjectLegalHoldInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -19984,7 +19989,7 @@ type GetObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object whose Legal Hold status you want to retrieve. @@ -20119,7 +20124,7 @@ type GetObjectLockConfigurationInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -20567,7 +20572,7 @@ type GetObjectRetentionInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -20587,7 +20592,7 @@ type GetObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID for the object whose retention settings you want to retrieve. @@ -20722,7 +20727,7 @@ type GetObjectTaggingInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -20750,7 +20755,7 @@ type GetObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The versionId of the object for which to get the tagging information. @@ -20910,7 +20915,7 @@ type GetObjectTorrentInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -21342,7 +21347,7 @@ type HeadBucketInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -21457,7 +21462,7 @@ type HeadObjectInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -21514,7 +21519,7 @@ type HeadObjectInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use to when encrypting the object (for example, @@ -22417,7 +22422,7 @@ func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { // Specifies the inventory configuration for an Amazon S3 bucket. For more information, // see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon Simple Storage Service API Reference. +// in the Amazon S3 API Reference. type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -23987,7 +23992,7 @@ type ListMultipartUploadsInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -24627,7 +24632,7 @@ type ListObjectsInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -24921,7 +24926,7 @@ type ListObjectsV2Input struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -25157,7 +25162,7 @@ type ListObjectsV2Output struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -25273,7 +25278,7 @@ type ListPartsInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -25308,7 +25313,7 @@ type ListPartsInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -25730,7 +25735,7 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { // Describes where logs are stored and the prefix that Amazon S3 assigns to // all log object keys for a bucket. For more information, see PUT Bucket logging // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon Simple Storage Service API Reference. +// in the Amazon S3 API Reference. type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -25953,7 +25958,7 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { // the existing metrics configuration. If you don't include the elements you // want to keep, they are erased. For more information, see PUT Bucket metrics // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// in the Amazon Simple Storage Service API Reference. +// in the Amazon S3 API Reference. type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -26155,7 +26160,7 @@ type NoncurrentVersionExpiration struct { // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. NoncurrentDays *int64 `type:"integer"` } @@ -27336,7 +27341,10 @@ type PutBucketAclInput struct { // Allows grantee to read the bucket ACL. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - // Allows grantee to create, overwrite, and delete any object in the bucket. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` // Allows grantee to write the ACL for the applicable bucket. @@ -29693,7 +29701,7 @@ type PutObjectAclInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -29720,7 +29728,10 @@ type PutObjectAclInput struct { // This action is not supported by Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - // Allows grantee to create, overwrite, and delete any object in the bucket. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` // Allows grantee to write the ACL for the applicable bucket. @@ -29734,7 +29745,7 @@ type PutObjectAclInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -29752,7 +29763,7 @@ type PutObjectAclInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -29944,7 +29955,7 @@ type PutObjectInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -30046,14 +30057,15 @@ type PutObjectInput struct { // The Object Lock mode that you want to apply to this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want this object's Object Lock to expire. + // The date and time when you want this object's Object Lock to expire. Must + // be formatted as a timestamp parameter. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use to when encrypting the object (for example, @@ -30080,13 +30092,11 @@ type PutObjectInput struct { // If x-amz-server-side-encryption is present and has the value of aws:kms, // this header specifies the ID of the AWS Key Management Service (AWS KMS) // symmetrical customer managed customer master key (CMK) that was used for - // the object. - // - // If the value of x-amz-server-side-encryption is aws:kms, this header specifies - // the ID of the symmetric customer managed AWS KMS CMK that will be used for // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS - // managed CMK in AWS to protect the data. + // managed CMK in AWS to protect the data. If the KMS key does not exist in + // the same account issuing the command, you must use the full ARN and not just + // the ID. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -30098,7 +30108,7 @@ type PutObjectInput struct { // Depending on performance needs, you can specify a different Storage Class. // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 Service Developer Guide. + // in the Amazon S3 User Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -30401,7 +30411,7 @@ type PutObjectLegalHoldInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -30425,7 +30435,7 @@ type PutObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object that you want to place a Legal Hold on. @@ -30578,7 +30588,7 @@ type PutObjectLockConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // A token to allow Object Lock to be enabled for an existing bucket. @@ -30831,7 +30841,7 @@ type PutObjectRetentionInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -30855,7 +30865,7 @@ type PutObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The container element for the Object Retention configuration. @@ -31007,7 +31017,7 @@ type PutObjectTaggingInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -31035,7 +31045,7 @@ type PutObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for the TagSet and Tag elements @@ -31752,7 +31762,7 @@ type ReplicationRule struct { // Optional configuration to replicate existing source bucket objects. For more // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. ExistingObjectReplication *ExistingObjectReplication `type:"structure"` // A filter that identifies the subset of objects to which the replication rule @@ -32195,7 +32205,7 @@ type RestoreObjectInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -32223,7 +32233,7 @@ type RestoreObjectInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for restore job parameters. @@ -32540,8 +32550,8 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { // Specifies lifecycle rules for an Amazon S3 bucket. For more information, // see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) -// in the Amazon Simple Storage Service API Reference. For examples, see Put -// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). +// in the Amazon S3 API Reference. For examples, see Put Bucket Lifecycle Configuration +// Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). type Rule struct { _ struct{} `type:"structure"` @@ -33287,17 +33297,17 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec // bucket. If a PUT Object request doesn't specify any server-side encryption, // this default encryption will be applied. For more information, see PUT Bucket // encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon Simple Storage Service API Reference. +// in the Amazon S3 API Reference. type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // AWS Key Management Service (KMS) customer master key ID to use for the default + // AWS Key Management Service (KMS) customer AWS KMS key ID to use for the default // encryption. This parameter is allowed if and only if SSEAlgorithm is set // to aws:kms. // - // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. + // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. // However, if you are using encryption with cross-account operations, you must - // use a fully qualified CMK ARN. For more information, see Using encryption + // use a fully qualified KMS key ARN. For more information, see Using encryption // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). // // For example: @@ -33306,8 +33316,8 @@ type ServerSideEncryptionByDefault struct { // // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // - // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more - // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For + // more information, see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the AWS Key Management Service Developer Guide. KMSMasterKeyID *string `type:"string" sensitive:"true"` @@ -33531,7 +33541,7 @@ type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` // Specifies whether Amazon S3 replicates objects created with server-side encryption - // using a customer master key (CMK) stored in AWS Key Management Service. + // using an AWS KMS key stored in AWS Key Management Service. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -34170,7 +34180,7 @@ type UploadPartCopyInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -34275,7 +34285,7 @@ type UploadPartCopyInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use to when encrypting the object (for example, @@ -34612,7 +34622,7 @@ type UploadPartInput struct { // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the AWS SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // When using this action with Amazon S3 on Outposts, you must direct requests @@ -34655,7 +34665,7 @@ type UploadPartInput struct { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 Developer Guide. + // in the Amazon S3 User Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use to when encrypting the object (for example, @@ -34919,7 +34929,7 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { // Describes the versioning state of an Amazon S3 bucket. For more information, // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon Simple Storage Service API Reference. +// in the Amazon S3 API Reference. type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -36028,6 +36038,9 @@ const ( // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" + + // InventoryOptionalFieldBucketKeyStatus is a InventoryOptionalField enum value + InventoryOptionalFieldBucketKeyStatus = "BucketKeyStatus" ) // InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum @@ -36044,6 +36057,7 @@ func InventoryOptionalField_Values() []string { InventoryOptionalFieldObjectLockMode, InventoryOptionalFieldObjectLockLegalHoldStatus, InventoryOptionalFieldIntelligentTieringAccessTier, + InventoryOptionalFieldBucketKeyStatus, } } @@ -36477,7 +36491,7 @@ func RequestCharged_Values() []string { // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) -// in the Amazon S3 Developer Guide. +// in the Amazon S3 User Guide. const ( // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go index 9fc2105fd2f..ba1a84d091f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -155,8 +155,9 @@ func endpointHandler(req *request.Request) { } case arn.OutpostAccessPointARN: // outposts does not support FIPS regions - if resReq.ResourceConfiguredForFIPS() { - req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil) + if resReq.UseFIPS() { + req.Error = s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID, + aws.StringValue(req.Config.Region), nil) return } diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 00000000000..23a0ada2fbb --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 00000000000..a0f4837a02c --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,225 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +func init() { + onceConn.Do(initConn) +} + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 00000000000..e06d2081865 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 00000000000..b39ddfa5cbd --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 00000000000..f79dbfca5cb --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 00000000000..b305a845fb2 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 00000000000..426603ef305 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 00000000000..38ce6d26184 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 00000000000..45530506537 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 00000000000..72e05207c52 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 00000000000..970086b9f97 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 00000000000..226b60c2253 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,245 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// Returns an empty string, only here to fulfill the pflag.Value interface. +func (l *LogLevel) Type() string { + return "" +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 00000000000..00ff37149ae --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,191 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +// SetLevel allows users to change the current logging level. +func (p *PackageLogger) SetLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + p.level = l +} + +// LevelAt checks if the given log level will be outputted under current setting. +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panicln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 00000000000..4be5a1f2de3 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/go-kit/log/.gitignore b/vendor/github.com/go-kit/log/.gitignore new file mode 100644 index 00000000000..66fd13c903c --- /dev/null +++ b/vendor/github.com/go-kit/log/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/go-kit/log/LICENSE b/vendor/github.com/go-kit/log/LICENSE new file mode 100644 index 00000000000..bb5bdb9cb8c --- /dev/null +++ b/vendor/github.com/go-kit/log/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Go kit + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md new file mode 100644 index 00000000000..a0931951df8 --- /dev/null +++ b/vendor/github.com/go-kit/log/README.md @@ -0,0 +1,151 @@ +# package log + +`package log` provides a minimal interface for structured logging in services. +It may be wrapped to encode conventions, enforce type-safety, provide leveled +logging, and so on. It can be used for both typical application log events, +and log-structured data streams. + +## Structured logging + +Structured logging is, basically, conceding to the reality that logs are +_data_, and warrant some level of schematic rigor. Using a stricter, +key/value-oriented message format for our logs, containing contextual and +semantic information, makes it much easier to get insight into the +operational activity of the systems we build. Consequently, `package log` is +of the strong belief that "[the benefits of structured logging outweigh the +minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". + +Migrating from unstructured to structured logging is probably a lot easier +than you'd expect. + +```go +// Unstructured +log.Printf("HTTP server listening on %s", addr) + +// Structured +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") +``` + +## Usage + +### Typical application logging + +```go +w := log.NewSyncWriter(os.Stderr) +logger := log.NewLogfmtLogger(w) +logger.Log("question", "what is the meaning of life?", "answer", 42) + +// Output: +// question="what is the meaning of life?" answer=42 +``` + +### Contextual Loggers + +```go +func main() { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.With(logger, "instance_id", 123) + + logger.Log("msg", "starting") + NewWorker(log.With(logger, "component", "worker")).Run() + NewSlacker(log.With(logger, "component", "slacker")).Run() +} + +// Output: +// instance_id=123 msg=starting +// instance_id=123 component=worker msg=running +// instance_id=123 component=slacker msg=running +``` + +### Interact with stdlib logger + +Redirect stdlib logger to Go kit logger. + +```go +import ( + "os" + stdlog "log" + kitlog "github.com/go-kit/log" +) + +func main() { + logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) + stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) + stdlog.Print("I sure like pie") +} + +// Output: +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} +``` + +Or, if, for legacy reasons, you need to pipe all of your logging through the +stdlib log package, you can redirect Go kit logger to the stdlib logger. + +```go +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) +logger.Log("legacy", true, "msg", "at least it's something") + +// Output: +// 2016/01/01 12:34:56 legacy=true msg="at least it's something" +``` + +### Timestamps and callers + +```go +var logger log.Logger +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + +logger.Log("msg", "hello") + +// Output: +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello +``` + +## Levels + +Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/log/level). + +## Supported output formats + +- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) +- JSON + +## Enhancements + +`package log` is centered on the one-method Logger interface. + +```go +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +This interface, and its supporting code like is the product of much iteration +and evaluation. For more details on the evolution of the Logger interface, +see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), +a talk by [Chris Hines](https://github.com/ChrisHines). +Also, please see +[#63](https://github.com/go-kit/kit/issues/63), +[#76](https://github.com/go-kit/kit/pull/76), +[#131](https://github.com/go-kit/kit/issues/131), +[#157](https://github.com/go-kit/kit/pull/157), +[#164](https://github.com/go-kit/kit/issues/164), and +[#252](https://github.com/go-kit/kit/pull/252) +to review historical conversations about package log and the Logger interface. + +Value-add packages and suggestions, +like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/log/level), +are of course welcome. Good proposals should + +- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/log#With), +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/log#Caller) in any wrapped contextual loggers, and +- Be friendly to packages that accept only an unadorned log.Logger. + +## Benchmarks & comparisons + +There are a few Go logging benchmarks and comparisons that include Go kit's package log. + +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/log/doc.go b/vendor/github.com/go-kit/log/doc.go new file mode 100644 index 00000000000..f744382fe49 --- /dev/null +++ b/vendor/github.com/go-kit/log/doc.go @@ -0,0 +1,116 @@ +// Package log provides a structured logger. +// +// Structured logging produces logs easily consumed later by humans or +// machines. Humans might be interested in debugging errors, or tracing +// specific requests. Machines might be interested in counting interesting +// events, or aggregating information for off-line processing. In both cases, +// it is important that the log messages are structured and actionable. +// Package log is designed to encourage both of these best practices. +// +// Basic Usage +// +// The fundamental interface is Logger. Loggers create log events from +// key/value data. The Logger interface has a single method, Log, which +// accepts a sequence of alternating key/value pairs, which this package names +// keyvals. +// +// type Logger interface { +// Log(keyvals ...interface{}) error +// } +// +// Here is an example of a function using a Logger to create log events. +// +// func RunTask(task Task, logger log.Logger) string { +// logger.Log("taskID", task.ID, "event", "starting task") +// ... +// logger.Log("taskID", task.ID, "event", "task complete") +// } +// +// The keys in the above example are "taskID" and "event". The values are +// task.ID, "starting task", and "task complete". Every key is followed +// immediately by its value. +// +// Keys are usually plain strings. Values may be any type that has a sensible +// encoding in the chosen log format. With structured logging it is a good +// idea to log simple values without formatting them. This practice allows +// the chosen logger to encode values in the most appropriate way. +// +// Contextual Loggers +// +// A contextual logger stores keyvals that it includes in all log events. +// Building appropriate contextual loggers reduces repetition and aids +// consistency in the resulting log output. With, WithPrefix, and WithSuffix +// add context to a logger. We can use With to improve the RunTask example. +// +// func RunTask(task Task, logger log.Logger) string { +// logger = log.With(logger, "taskID", task.ID) +// logger.Log("event", "starting task") +// ... +// taskHelper(task.Cmd, logger) +// ... +// logger.Log("event", "task complete") +// } +// +// The improved version emits the same log events as the original for the +// first and last calls to Log. Passing the contextual logger to taskHelper +// enables each log event created by taskHelper to include the task.ID even +// though taskHelper does not have access to that value. Using contextual +// loggers this way simplifies producing log output that enables tracing the +// life cycle of individual tasks. (See the Contextual example for the full +// code of the above snippet.) +// +// Dynamic Contextual Values +// +// A Valuer function stored in a contextual logger generates a new value each +// time an event is logged. The Valuer example demonstrates how this feature +// works. +// +// Valuers provide the basis for consistently logging timestamps and source +// code location. The log package defines several valuers for that purpose. +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and +// DefaultCaller. A common logger initialization sequence that ensures all log +// entries contain a timestamp and source location looks like this: +// +// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) +// +// Concurrent Safety +// +// Applications with multiple goroutines want each log event written to the +// same logger to remain separate from other log events. Package log provides +// two simple solutions for concurrent safe logging. +// +// NewSyncWriter wraps an io.Writer and serializes each call to its Write +// method. Using a SyncWriter has the benefit that the smallest practical +// portion of the logging logic is performed within a mutex, but it requires +// the formatting Logger to make only one call to Write per log event. +// +// NewSyncLogger wraps any Logger and serializes each call to its Log method. +// Using a SyncLogger has the benefit that it guarantees each log event is +// handled atomically within the wrapped logger, but it typically serializes +// both the formatting and output logic. Use a SyncLogger if the formatting +// logger may perform multiple writes per log event. +// +// Error Handling +// +// This package relies on the practice of wrapping or decorating loggers with +// other loggers to provide composable pieces of functionality. It also means +// that Logger.Log must return an error because some +// implementations—especially those that output log data to an io.Writer—may +// encounter errors that cannot be handled locally. This in turn means that +// Loggers that wrap other loggers should return errors from the wrapped +// logger up the stack. +// +// Fortunately, the decorator pattern also provides a way to avoid the +// necessity to check for errors every time an application calls Logger.Log. +// An application required to panic whenever its Logger encounters +// an error could initialize its logger as follows. +// +// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger := log.LoggerFunc(func(keyvals ...interface{}) error { +// if err := fmtlogger.Log(keyvals...); err != nil { +// panic(err) +// } +// return nil +// }) +package log diff --git a/vendor/github.com/go-kit/log/go.mod b/vendor/github.com/go-kit/log/go.mod new file mode 100644 index 00000000000..7c87949ef78 --- /dev/null +++ b/vendor/github.com/go-kit/log/go.mod @@ -0,0 +1,8 @@ +module github.com/go-kit/log + +go 1.16 + +require ( + github.com/go-logfmt/logfmt v0.5.0 + github.com/go-stack/stack v1.8.0 +) diff --git a/vendor/github.com/go-kit/log/go.sum b/vendor/github.com/go-kit/log/go.sum new file mode 100644 index 00000000000..59a7cf6c277 --- /dev/null +++ b/vendor/github.com/go-kit/log/go.sum @@ -0,0 +1,4 @@ +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go new file mode 100644 index 00000000000..0cedbf82478 --- /dev/null +++ b/vendor/github.com/go-kit/log/json_logger.go @@ -0,0 +1,91 @@ +package log + +import ( + "encoding" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type jsonLogger struct { + io.Writer +} + +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + return &jsonLogger{w} +} + +func (l *jsonLogger) Log(keyvals ...interface{}) error { + n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd + m := make(map[string]interface{}, n) + for i := 0; i < len(keyvals); i += 2 { + k := keyvals[i] + var v interface{} = ErrMissingValue + if i+1 < len(keyvals) { + v = keyvals[i+1] + } + merge(m, k, v) + } + enc := json.NewEncoder(l.Writer) + enc.SetEscapeHTML(false) + return enc.Encode(m) +} + +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + + // We want json.Marshaler and encoding.TextMarshaller to take priority over + // err.Error() and v.String(). But json.Marshall (called later) does that by + // default so we force a no-op if it's one of those 2 case. + switch x := v.(type) { + case json.Marshaler: + case encoding.TextMarshaler: + case error: + v = safeError(x) + case fmt.Stringer: + v = safeString(x) + } + + dst[key] = v +} + +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + panic(panicVal) + } + } + }() + s = str.String() + return +} + +func safeError(err error) (s interface{}) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s = nil + } else { + panic(panicVal) + } + } + }() + s = err.Error() + return +} diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go new file mode 100644 index 00000000000..505d307b11b --- /dev/null +++ b/vendor/github.com/go-kit/log/level/doc.go @@ -0,0 +1,22 @@ +// Package level implements leveled logging on top of Go kit's log package. To +// use the level package, create a logger as per normal in your func main, and +// wrap it with level.NewFilter. +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.AllowInfo()) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// +// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error +// helper methods to emit leveled log events. +// +// logger.Log("foo", "bar") // as normal, no level +// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) +// if value > 100 { +// level.Error(logger).Log("value", value) +// } +// +// NewFilter allows precise control over what happens when a log event is +// emitted without a level key, or if a squelched level is used. Check the +// Option functions for details. +package level diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go new file mode 100644 index 00000000000..c94756c6bea --- /dev/null +++ b/vendor/github.com/go-kit/log/level/level.go @@ -0,0 +1,205 @@ +package level + +import "github.com/go-kit/log" + +// Error returns a logger that includes a Key/ErrorValue pair. +func Error(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), ErrorValue()) +} + +// Warn returns a logger that includes a Key/WarnValue pair. +func Warn(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), WarnValue()) +} + +// Info returns a logger that includes a Key/InfoValue pair. +func Info(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), InfoValue()) +} + +// Debug returns a logger that includes a Key/DebugValue pair. +func Debug(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), DebugValue()) +} + +// NewFilter wraps next and implements level filtering. See the commentary on +// the Option functions for a detailed description of how to configure levels. +// If no options are provided, all leveled log events created with Debug, +// Info, Warn or Error helper methods are squelched and non-leveled log +// events are passed to next unmodified. +func NewFilter(next log.Logger, options ...Option) log.Logger { + l := &logger{ + next: next, + } + for _, option := range options { + option(l) + } + return l +} + +type logger struct { + next log.Logger + allowed level + squelchNoLevel bool + errNotAllowed error + errNoLevel error +} + +func (l *logger) Log(keyvals ...interface{}) error { + var hasLevel, levelAllowed bool + for i := 1; i < len(keyvals); i += 2 { + if v, ok := keyvals[i].(*levelValue); ok { + hasLevel = true + levelAllowed = l.allowed&v.level != 0 + break + } + } + if !hasLevel && l.squelchNoLevel { + return l.errNoLevel + } + if hasLevel && !levelAllowed { + return l.errNotAllowed + } + return l.next.Log(keyvals...) +} + +// Option sets a parameter for the leveled logger. +type Option func(*logger) + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, warn, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelWarn | levelInfo | levelDebug) +} + +// AllowInfo allows error, warn and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelWarn | levelInfo) +} + +// AllowWarn allows error and warn level log events to pass. +func AllowWarn() Option { + return allowed(levelError | levelWarn) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *logger) { l.allowed = allowed } +} + +// ErrNotAllowed sets the error to return from Log when it squelches a log +// event disallowed by the configured Allow[Level] option. By default, +// ErrNotAllowed is nil; in this case the log event is squelched with no +// error. +func ErrNotAllowed(err error) Option { + return func(l *logger) { l.errNotAllowed = err } +} + +// SquelchNoLevel instructs Log to squelch log events with no level, so that +// they don't proceed through to the wrapped logger. If SquelchNoLevel is set +// to true and a log event is squelched in this way, the error value +// configured with ErrNoLevel is returned to the caller. +func SquelchNoLevel(squelch bool) Option { + return func(l *logger) { l.squelchNoLevel = squelch } +} + +// ErrNoLevel sets the error to return from Log when it squelches a log event +// with no level. By default, ErrNoLevel is nil; in this case the log event is +// squelched with no error. +func ErrNoLevel(err error) Option { + return func(l *logger) { l.errNoLevel = err } +} + +// NewInjector wraps next and returns a logger that adds a Key/level pair to +// the beginning of log events that don't already contain a level. In effect, +// this gives a default level to logs without a level. +func NewInjector(next log.Logger, level Value) log.Logger { + return &injector{ + next: next, + level: level, + } +} + +type injector struct { + next log.Logger + level interface{} +} + +func (l *injector) Log(keyvals ...interface{}) error { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(*levelValue); ok { + return l.next.Log(keyvals...) + } + } + kvs := make([]interface{}, len(keyvals)+2) + kvs[0], kvs[1] = key, l.level + copy(kvs[2:], keyvals) + return l.next.Log(kvs...) +} + +// Value is the interface that each of the canonical level values implement. +// It contains unexported methods that prevent types from other packages from +// implementing it and guaranteeing that NewFilter can distinguish the levels +// defined in this package from all other values. +type Value interface { + String() string + levelVal() +} + +// Key returns the unique key added to log events by the loggers in this +// package. +func Key() interface{} { return key } + +// ErrorValue returns the unique value added to log events by Error. +func ErrorValue() Value { return errorValue } + +// WarnValue returns the unique value added to log events by Warn. +func WarnValue() Value { return warnValue } + +// InfoValue returns the unique value added to log events by Info. +func InfoValue() Value { return infoValue } + +// DebugValue returns the unique value added to log events by Debug. +func DebugValue() Value { return debugValue } + +var ( + // key is of type interface{} so that it allocates once during package + // initialization and avoids allocating every time the value is added to a + // []interface{} later. + key interface{} = "level" + + errorValue = &levelValue{level: levelError, name: "error"} + warnValue = &levelValue{level: levelWarn, name: "warn"} + infoValue = &levelValue{level: levelInfo, name: "info"} + debugValue = &levelValue{level: levelDebug, name: "debug"} +) + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelWarn + levelError +) + +type levelValue struct { + name string + level +} + +func (v *levelValue) String() string { return v.name } +func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/log/log.go b/vendor/github.com/go-kit/log/log.go new file mode 100644 index 00000000000..62e11adace5 --- /dev/null +++ b/vendor/github.com/go-kit/log/log.go @@ -0,0 +1,179 @@ +package log + +import "errors" + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// Implementations must be safe for concurrent use by multiple goroutines. In +// particular, any implementation of Logger that appends to keyvals or +// modifies or retains any of its elements must make a copy first. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// ErrMissingValue is appended to keyvals slices with odd length to substitute +// the missing value. +var ErrMissingValue = errors.New("(MISSING)") + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Log. If logger is also a contextual logger created by With, +// WithPrefix, or WithSuffix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func With(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + return &context{ + logger: l.logger, + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + keyvals: kvs[:len(kvs):len(kvs)], + hasValuer: l.hasValuer || containsValuer(keyvals), + sKeyvals: l.sKeyvals, + sHasValuer: l.sHasValuer, + } +} + +// WithPrefix returns a new contextual logger with keyvals prepended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithPrefix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.keyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(kvs, l.keyvals...) + return &context{ + logger: l.logger, + keyvals: kvs, + hasValuer: l.hasValuer || containsValuer(keyvals), + sKeyvals: l.sKeyvals, + sHasValuer: l.sHasValuer, + } +} + +// WithSuffix returns a new contextual logger with keyvals appended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithSuffix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.sKeyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(l.sKeyvals, kvs...) + return &context{ + logger: l.logger, + keyvals: l.keyvals, + hasValuer: l.hasValuer, + sKeyvals: kvs, + sHasValuer: l.sHasValuer || containsValuer(keyvals), + } +} + +// context is the Logger implementation returned by With, WithPrefix, and +// WithSuffix. It wraps a Logger and holds keyvals that it includes in all +// log events. Its Log method calls bindValues to generate values for each +// Valuer in the context keyvals. +// +// A context must always have the same number of stack frames between calls to +// its Log method and the eventual binding of Valuers to their value. This +// requirement comes from the functional requirement to allow a context to +// resolve application call site information for a Caller stored in the +// context. To do this we must be able to predict the number of logging +// functions on the stack when bindValues is called. +// +// Two implementation details provide the needed stack depth consistency. +// +// 1. newContext avoids introducing an additional layer when asked to +// wrap another context. +// 2. With, WithPrefix, and WithSuffix avoid introducing an additional +// layer by returning a newly constructed context with a merged keyvals +// rather than simply wrapping the existing context. +type context struct { + logger Logger + keyvals []interface{} + sKeyvals []interface{} // suffixes + hasValuer bool + sHasValuer bool +} + +func newContext(logger Logger) *context { + if c, ok := logger.(*context); ok { + return c + } + return &context{logger: logger} +} + +// Log replaces all value elements (odd indexes) containing a Valuer in the +// stored context with their generated value, appends keyvals, and passes the +// result to the wrapped Logger. +func (l *context) Log(keyvals ...interface{}) error { + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + if l.hasValuer { + // If no keyvals were appended above then we must copy l.keyvals so + // that future log events will reevaluate the stored Valuers. + if len(keyvals) == 0 { + kvs = append([]interface{}{}, l.keyvals...) + } + bindValues(kvs[:(len(l.keyvals))]) + } + kvs = append(kvs, l.sKeyvals...) + if l.sHasValuer { + bindValues(kvs[len(kvs)-len(l.sKeyvals):]) + } + return l.logger.Log(kvs...) +} + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/go-kit/log/logfmt_logger.go b/vendor/github.com/go-kit/log/logfmt_logger.go new file mode 100644 index 00000000000..a00305298b8 --- /dev/null +++ b/vendor/github.com/go-kit/log/logfmt_logger.go @@ -0,0 +1,62 @@ +package log + +import ( + "bytes" + "io" + "sync" + + "github.com/go-logfmt/logfmt" +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *logfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type logfmtLogger struct { + w io.Writer +} + +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in +// logfmt format. Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewLogfmtLogger(w io.Writer) Logger { + return &logfmtLogger{w} +} + +func (l logfmtLogger) Log(keyvals ...interface{}) error { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-kit/log/nop_logger.go b/vendor/github.com/go-kit/log/nop_logger.go new file mode 100644 index 00000000000..1047d626c43 --- /dev/null +++ b/vendor/github.com/go-kit/log/nop_logger.go @@ -0,0 +1,8 @@ +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return nopLogger{} } + +func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/log/stdlib.go b/vendor/github.com/go-kit/log/stdlib.go new file mode 100644 index 00000000000..0338edbe2ba --- /dev/null +++ b/vendor/github.com/go-kit/log/stdlib.go @@ -0,0 +1,151 @@ +package log + +import ( + "bytes" + "io" + "log" + "regexp" + "strings" +) + +// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's +// designed to be passed to a Go kit logger as the writer, for cases where +// it's necessary to redirect all Go kit log output to the stdlib logger. +// +// If you have any choice in the matter, you shouldn't use this. Prefer to +// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. +type StdlibWriter struct{} + +// Write implements io.Writer. +func (w StdlibWriter) Write(p []byte) (int, error) { + log.Print(strings.TrimSpace(string(p))) + return len(p), nil +} + +// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib +// logger's SetOutput. It will extract date/timestamps, filenames, and +// messages, and place them under relevant keys. +type StdlibAdapter struct { + Logger + timestampKey string + fileKey string + messageKey string + prefix string + joinPrefixToMsg bool +} + +// StdlibAdapterOption sets a parameter for the StdlibAdapter. +type StdlibAdapterOption func(*StdlibAdapter) + +// TimestampKey sets the key for the timestamp field. By default, it's "ts". +func TimestampKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.timestampKey = key } +} + +// FileKey sets the key for the file and line field. By default, it's "caller". +func FileKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.fileKey = key } +} + +// MessageKey sets the key for the actual log message. By default, it's "msg". +func MessageKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.messageKey = key } +} + +// Prefix configures the adapter to parse a prefix from stdlib log events. If +// you provide a non-empty prefix to the stdlib logger, then your should provide +// that same prefix to the adapter via this option. +// +// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to +// true if you want to include the parsed prefix in the msg. +func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.prefix = prefix; a.joinPrefixToMsg = joinPrefixToMsg } +} + +// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed +// logger. It's designed to be passed to log.SetOutput. +func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { + a := StdlibAdapter{ + Logger: logger, + timestampKey: "ts", + fileKey: "caller", + messageKey: "msg", + } + for _, option := range options { + option(&a) + } + return a +} + +func (a StdlibAdapter) Write(p []byte) (int, error) { + p = a.handlePrefix(p) + + result := subexps(p) + keyvals := []interface{}{} + var timestamp string + if date, ok := result["date"]; ok && date != "" { + timestamp = date + } + if time, ok := result["time"]; ok && time != "" { + if timestamp != "" { + timestamp += " " + } + timestamp += time + } + if timestamp != "" { + keyvals = append(keyvals, a.timestampKey, timestamp) + } + if file, ok := result["file"]; ok && file != "" { + keyvals = append(keyvals, a.fileKey, file) + } + if msg, ok := result["msg"]; ok { + msg = a.handleMessagePrefix(msg) + keyvals = append(keyvals, a.messageKey, msg) + } + if err := a.Logger.Log(keyvals...); err != nil { + return 0, err + } + return len(p), nil +} + +func (a StdlibAdapter) handlePrefix(p []byte) []byte { + if a.prefix != "" { + p = bytes.TrimPrefix(p, []byte(a.prefix)) + } + return p +} + +func (a StdlibAdapter) handleMessagePrefix(msg string) string { + if a.prefix == "" { + return msg + } + + msg = strings.TrimPrefix(msg, a.prefix) + if a.joinPrefixToMsg { + msg = a.prefix + msg + } + return msg +} + +const ( + logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` + logRegexpTime = `(?P